^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 1996-1999 Thomas Bogendoerfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 1993 United States Government as represented by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Director, National Security Agency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * of the GNU General Public License, incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * This driver is for PCnet32 and PCnetPCI based ethercards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * 23 Oct, 2000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Fixed a few bugs, related to running the controller in 32bit mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Carsten Langgaard, carstenl@mips.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DRV_NAME "pcnet32"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DRV_RELDATE "21.Apr.2008"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PFX DRV_NAME ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * PCI device identifiers for "new style" Linux PCI Device Drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static const struct pci_device_id pcnet32_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the incorrect vendor id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { } /* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int cards_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * VLB I/O addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static unsigned int pcnet32_portlist[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) { 0x300, 0x320, 0x340, 0x360, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int pcnet32_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int pcnet32vlb; /* check for VLB cards ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static struct net_device *pcnet32_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int max_interrupt_work = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int rx_copybreak = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define PCNET32_PORT_AUI 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define PCNET32_PORT_10BT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define PCNET32_PORT_GPSI 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define PCNET32_PORT_MII 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define PCNET32_PORT_PORTSEL 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define PCNET32_PORT_ASEL 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define PCNET32_PORT_100 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define PCNET32_PORT_FD 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define PCNET32_DMA_MASK 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * table to translate option values from tulip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * to internal options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static const unsigned char options_mapping[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) PCNET32_PORT_ASEL, /* 0 Auto-select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) PCNET32_PORT_AUI, /* 1 BNC/AUI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) PCNET32_PORT_AUI, /* 2 AUI/BNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) PCNET32_PORT_ASEL, /* 3 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) PCNET32_PORT_ASEL, /* 5 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) PCNET32_PORT_ASEL, /* 6 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) PCNET32_PORT_ASEL, /* 7 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) PCNET32_PORT_ASEL, /* 8 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) PCNET32_PORT_MII, /* 9 MII 10baseT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) PCNET32_PORT_MII, /* 11 MII (autosel) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) PCNET32_PORT_10BT, /* 12 10BaseT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* 14 MII 100BaseTx-FD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) PCNET32_PORT_ASEL /* 15 not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) "Loopback test (offline)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define PCNET32_NUM_REGS 136
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define MAX_UNITS 8 /* More are supported, limit only on options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int options[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int full_duplex[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static int homepna[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Theory of Operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * This driver uses the same software structure as the normal lance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * driver. So look for a verbose description in lance.c. The differences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * to the normal lance driver is the use of the 32bit mode of PCnet32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * and PCnetPCI chips. Because these chips are 32bit chips, there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * 16MB limitation and we don't need bounce buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Set the number of Tx and Rx buffers, using Log_2(# buffers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifndef PCNET32_LOG_TX_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define PCNET32_LOG_TX_BUFFERS 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define PCNET32_LOG_RX_BUFFERS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define PCNET32_LOG_MAX_RX_BUFFERS 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define PKT_BUF_SKB 1544
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* actual buffer length after being aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* chip wants twos complement of the (aligned) buffer length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Offsets from base I/O address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define PCNET32_WIO_RDP 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define PCNET32_WIO_RAP 0x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define PCNET32_WIO_RESET 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define PCNET32_WIO_BDP 0x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define PCNET32_DWIO_RDP 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define PCNET32_DWIO_RAP 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define PCNET32_DWIO_RESET 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define PCNET32_DWIO_BDP 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define PCNET32_TOTAL_SIZE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define CSR0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define CSR0_INIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define CSR0_START 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define CSR0_STOP 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define CSR0_TXPOLL 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define CSR0_INTEN 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define CSR0_IDON 0x0100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define PCNET32_INIT_LOW 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define PCNET32_INIT_HIGH 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define CSR3 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define CSR4 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define CSR5 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define CSR5_SUSPEND 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define CSR15 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define PCNET32_MC_FILTER 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define PCNET32_79C970A 0x2621
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* The PCNET32 Rx and Tx ring descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct pcnet32_rx_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __le32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __le16 buf_length; /* two`s complement of length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __le16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __le32 msg_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __le32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct pcnet32_tx_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __le32 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __le16 length; /* two`s complement of length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __le16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __le32 misc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __le32 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* The PCNET32 32-Bit initialization block, described in databook. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct pcnet32_init_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) __le16 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __le16 tlen_rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u8 phys_addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __le32 filter[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* Receive and transmit ring base, along with extra bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __le32 rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __le32 tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* PCnet32 access functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct pcnet32_access {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u16 (*read_csr) (unsigned long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void (*write_csr) (unsigned long, int, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u16 (*read_bcr) (unsigned long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) void (*write_bcr) (unsigned long, int, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u16 (*read_rap) (unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void (*write_rap) (unsigned long, u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void (*reset) (unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * The first field of pcnet32_private is read by the ethernet device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * so the structure should be allocated using dma_alloc_coherent().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct pcnet32_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct pcnet32_init_block *init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct pcnet32_rx_head *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct pcnet32_tx_head *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) returned by dma_alloc_coherent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* The saved address of a sent-in-place packet/buffer, for skfree(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct sk_buff **tx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct sk_buff **rx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dma_addr_t *tx_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) dma_addr_t *rx_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) const struct pcnet32_access *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spinlock_t lock; /* Guard lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned int cur_rx, cur_tx; /* The next free ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned int rx_ring_size; /* current rx ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned int tx_ring_size; /* current tx ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int rx_mod_mask; /* rx ring modular mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned int tx_mod_mask; /* tx ring modular mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned short rx_len_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned short tx_len_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) dma_addr_t rx_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) dma_addr_t tx_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int dirty_rx, /* ring entries to be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) char tx_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) char phycount; /* number of phys found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) unsigned int shared_irq:1, /* shared irq possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dxsuflo:1, /* disable transmit stop on uflo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) mii:1, /* mii port available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) autoneg:1, /* autoneg enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) port_tp:1, /* port set to TP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) fdx:1; /* full duplex enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct net_device *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct mii_if_info mii_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct timer_list watchdog_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u32 msg_enable; /* debug message level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* each bit indicates an available PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u32 phymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned short chip_version; /* which variant this is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* saved registers during ethtool blink */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u16 save_regs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int pcnet32_open(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int pcnet32_init_ring(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static irqreturn_t pcnet32_interrupt(int, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static int pcnet32_close(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static struct net_device_stats *pcnet32_get_stats(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static void pcnet32_load_multicast(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void pcnet32_set_multicast_list(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void pcnet32_watchdog(struct timer_list *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void pcnet32_ethtool_test(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct ethtool_test *eth_test, u64 * data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int pcnet32_get_regs_len(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void pcnet32_purge_tx_ring(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void pcnet32_free_ring(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void pcnet32_check_media(struct net_device *dev, int verbose);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) outw(index, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return inw(addr + PCNET32_WIO_RDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) outw(index, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) outw(val, addr + PCNET32_WIO_RDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) outw(index, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return inw(addr + PCNET32_WIO_BDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) outw(index, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) outw(val, addr + PCNET32_WIO_BDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static u16 pcnet32_wio_read_rap(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return inw(addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) outw(val, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static void pcnet32_wio_reset(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) inw(addr + PCNET32_WIO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int pcnet32_wio_check(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) outw(88, addr + PCNET32_WIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return inw(addr + PCNET32_WIO_RAP) == 88;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static const struct pcnet32_access pcnet32_wio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) .read_csr = pcnet32_wio_read_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) .write_csr = pcnet32_wio_write_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) .read_bcr = pcnet32_wio_read_bcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) .write_bcr = pcnet32_wio_write_bcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .read_rap = pcnet32_wio_read_rap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .write_rap = pcnet32_wio_write_rap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .reset = pcnet32_wio_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) outl(index, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) outl(index, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) outl(val, addr + PCNET32_DWIO_RDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) outl(index, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) outl(index, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) outl(val, addr + PCNET32_DWIO_BDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static u16 pcnet32_dwio_read_rap(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) outl(val, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static void pcnet32_dwio_reset(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) inl(addr + PCNET32_DWIO_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int pcnet32_dwio_check(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) outl(88, addr + PCNET32_DWIO_RAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static const struct pcnet32_access pcnet32_dwio = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .read_csr = pcnet32_dwio_read_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .write_csr = pcnet32_dwio_write_csr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .read_bcr = pcnet32_dwio_read_bcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .write_bcr = pcnet32_dwio_write_bcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .read_rap = pcnet32_dwio_read_rap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .write_rap = pcnet32_dwio_write_rap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .reset = pcnet32_dwio_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static void pcnet32_netif_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) napi_disable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void pcnet32_netif_start(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) val = lp->a->read_csr(ioaddr, CSR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) val &= 0x00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) lp->a->write_csr(ioaddr, CSR3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) napi_enable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * Allocate space for the new sized tx ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Free old resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * Save new resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * Any failure keeps old resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * Must be called with lp->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void pcnet32_realloc_tx_ring(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct pcnet32_private *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dma_addr_t new_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dma_addr_t *new_dma_addr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct pcnet32_tx_head *new_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct sk_buff **new_skb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned int entries = BIT(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) pcnet32_purge_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) new_tx_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) sizeof(struct pcnet32_tx_head) * entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) &new_ring_dma_addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (new_tx_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!new_dma_addr_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) goto free_new_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!new_skb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) goto free_new_lists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kfree(lp->tx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) kfree(lp->tx_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) lp->tx_ring, lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) lp->tx_ring_size = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) lp->tx_mod_mask = lp->tx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) lp->tx_len_bits = (size << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) lp->tx_ring = new_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) lp->tx_ring_dma_addr = new_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) lp->tx_dma_addr = new_dma_addr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) lp->tx_skbuff = new_skb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) free_new_lists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kfree(new_dma_addr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) free_new_tx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) sizeof(struct pcnet32_tx_head) * entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) new_tx_ring, new_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Allocate space for the new sized rx ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Re-use old receive buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * alloc extra buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * free unneeded buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * free unneeded buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Save new resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Any failure keeps old resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * Must be called with lp->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static void pcnet32_realloc_rx_ring(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct pcnet32_private *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_addr_t new_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dma_addr_t *new_dma_addr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct pcnet32_rx_head *new_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct sk_buff **new_skb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int new, overlap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int entries = BIT(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) new_rx_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) sizeof(struct pcnet32_rx_head) * entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) &new_ring_dma_addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (new_rx_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!new_dma_addr_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto free_new_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!new_skb_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) goto free_new_lists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* first copy the current receive buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) overlap = min(entries, lp->rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) for (new = 0; new < overlap; new++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) new_rx_ring[new] = lp->rx_ring[new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) new_dma_addr_list[new] = lp->rx_dma_addr[new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) new_skb_list[new] = lp->rx_skbuff[new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* now allocate any new buffers needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) for (; new < entries; new++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct sk_buff *rx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) rx_skbuff = new_skb_list[new];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!rx_skbuff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* keep the original lists and buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto free_all_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) skb_reserve(rx_skbuff, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) new_dma_addr_list[new] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) PKT_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) netif_err(lp, drv, dev, "%s dma mapping failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) dev_kfree_skb(new_skb_list[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto free_all_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) new_rx_ring[new].status = cpu_to_le16(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* and free any unneeded buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) for (; new < lp->rx_ring_size; new++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (lp->rx_skbuff[new]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) lp->rx_dma_addr[new],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) PKT_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dev_kfree_skb(lp->rx_skbuff[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) kfree(lp->rx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) kfree(lp->rx_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) lp->rx_ring, lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) lp->rx_ring_size = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) lp->rx_mod_mask = lp->rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) lp->rx_len_bits = (size << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) lp->rx_ring = new_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) lp->rx_ring_dma_addr = new_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) lp->rx_dma_addr = new_dma_addr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) lp->rx_skbuff = new_skb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) free_all_new:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) while (--new >= lp->rx_ring_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (new_skb_list[new]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) new_dma_addr_list[new],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) PKT_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) dev_kfree_skb(new_skb_list[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) kfree(new_skb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) free_new_lists:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) kfree(new_dma_addr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) free_new_rx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) sizeof(struct pcnet32_rx_head) * entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) new_rx_ring, new_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void pcnet32_purge_rx_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* free all allocated skbuffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) for (i = 0; i < lp->rx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) lp->rx_ring[i].status = 0; /* CPU owns buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) wmb(); /* Make sure adapter sees owner change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (lp->rx_skbuff[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) lp->rx_dma_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) PKT_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) dev_kfree_skb_any(lp->rx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) lp->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) lp->rx_dma_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) static void pcnet32_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pcnet32_interrupt(0, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * lp->lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int csr5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) const struct pcnet32_access *a = lp->a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) int ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* really old chips have to be stopped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (lp->chip_version < PCNET32_79C970A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* set SUSPEND (SPND) - CSR5 bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) csr5 = a->read_csr(ioaddr, CSR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* poll waiting for bit to be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) spin_unlock_irqrestore(&lp->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (can_sleep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) spin_lock_irqsave(&lp->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ticks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (ticks > 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) "Error getting into suspend!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int csr5 = lp->a->read_csr(ioaddr, CSR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* clear SUSPEND (SPND) - CSR5 bit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static int pcnet32_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) } else if (lp->chip_version == PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (lp->autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) cmd->base.port = PORT_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) cmd->base.port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) cmd->base.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ethtool_convert_legacy_u32_to_link_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) SUPPORTED_TP | SUPPORTED_AUI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int pcnet32_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) int r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int suspended, bcr2, bcr9, csr15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } else if (lp->chip_version == PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) suspended = pcnet32_suspend(dev, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) bcr2 = lp->a->read_bcr(ioaddr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) lp->port_tp = cmd->base.port == PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (cmd->base.port == PORT_TP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) csr15 |= 0x0080;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) lp->a->write_csr(ioaddr, CSR15, csr15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) lp->init_block->mode = cpu_to_le16(csr15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) lp->fdx = cmd->base.duplex == DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (cmd->base.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) bcr9 |= 0x0003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) lp->a->write_bcr(ioaddr, 9, bcr9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (suspended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) pcnet32_clr_suspend(lp, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) else if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) pcnet32_restart(dev, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void pcnet32_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (lp->pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) strlcpy(info->bus_info, pci_name(lp->pci_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) snprintf(info->bus_info, sizeof(info->bus_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) "VLB 0x%lx", dev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static u32 pcnet32_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) r = mii_link_ok(&lp->mii_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) } else if (lp->chip_version == PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ulong ioaddr = dev->base_addr; /* card base I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* only read link if port is set to TP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (!lp->autoneg && lp->port_tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) else /* link always up for AUI port or port auto select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) } else if (lp->chip_version > PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ulong ioaddr = dev->base_addr; /* card base I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) } else { /* can not detect link on really old chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) r = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static u32 pcnet32_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return lp->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) lp->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static int pcnet32_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int r = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) r = mii_nway_restart(&lp->mii_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static void pcnet32_get_ringparam(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ering->tx_max_pending = TX_MAX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ering->tx_pending = lp->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ering->rx_max_pending = RX_MAX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ering->rx_pending = lp->rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static int pcnet32_set_ringparam(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (ering->rx_mini_pending || ering->rx_jumbo_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) pcnet32_netif_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* set the minimum ring size to 4, to allow the loopback test to work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (size <= (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if ((1 << i) != lp->tx_ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pcnet32_realloc_tx_ring(dev, lp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (size <= (1 << i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if ((1 << i) != lp->rx_ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) pcnet32_realloc_rx_ring(dev, lp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) lp->napi.weight = lp->rx_ring_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) pcnet32_netif_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) pcnet32_restart(dev, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) lp->rx_ring_size, lp->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int pcnet32_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return PCNET32_TEST_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) static void pcnet32_ethtool_test(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct ethtool_test *test, u64 * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (test->flags == ETH_TEST_FL_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) rc = pcnet32_loopback_test(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) "Loopback test failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) test->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) "Loopback test passed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "No tests to run (specify 'Offline' on ethtool)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) } /* end pcnet32_ethtool_test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) const struct pcnet32_access *a = lp->a; /* access to registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ulong ioaddr = dev->base_addr; /* card base I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct sk_buff *skb; /* sk buff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int x, i; /* counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int numbuffs = 4; /* number of TX/RX buffers and descs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u16 status = 0x8300; /* TX ring status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) __le16 teststatus; /* test of ring status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) int rc; /* return code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) int size; /* size of packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) unsigned char *packet; /* source packet data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static const int data_len = 60; /* length of source packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) unsigned long ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rc = 1; /* default to fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) pcnet32_netif_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Reset the PCNET32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) lp->a->reset(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* switch pcnet32 to 32bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) lp->a->write_bcr(ioaddr, 20, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /* purge & init rings but don't actually restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) pcnet32_restart(dev, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Initialize Transmit buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) size = data_len + 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) for (x = 0; x < numbuffs; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) skb = netdev_alloc_skb(dev, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) "Cannot allocate skb at line: %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) packet = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) skb_put(skb, size); /* create space for data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) lp->tx_skbuff[x] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) lp->tx_ring[x].length = cpu_to_le16(-skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) lp->tx_ring[x].misc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* put DA and SA into the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *packet++ = dev->dev_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *packet++ = dev->dev_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *packet++ = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) *packet++ = 0x06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* packet number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *packet++ = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* fill packet with data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) for (i = 0; i < data_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *packet++ = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) lp->tx_dma_addr[x] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) "DMA mapping error at line: %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) __LINE__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) wmb(); /* Make sure owner changes after all others are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) lp->tx_ring[x].status = cpu_to_le16(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) a->write_bcr(ioaddr, 32, x | 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* set int loopback in CSR15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) x = a->read_csr(ioaddr, CSR15) & 0xfffc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) teststatus = cpu_to_le16(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Check status of descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) for (x = 0; x < numbuffs; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ticks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (ticks == 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) for (x = 0; x < numbuffs; x++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) skb = lp->rx_skbuff[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) pr_cont(" %02x", *(skb->data + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) while (x < numbuffs && !rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) skb = lp->rx_skbuff[x];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) packet = lp->tx_skbuff[x]->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (*(skb->data + i) != packet[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) netif_printk(lp, hw, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) "Error in compare! %2x - %02x %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) i, *(skb->data + i), packet[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) x++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) clean_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) *data1 = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) pcnet32_purge_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) x = a->read_csr(ioaddr, CSR15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) a->write_bcr(ioaddr, 32, (x & ~0x0002));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pcnet32_netif_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) pcnet32_restart(dev, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) pcnet32_purge_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) } /* end pcnet32_loopback_test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int pcnet32_set_phys_id(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) const struct pcnet32_access *a = lp->a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* Save the current value of the bcrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) for (i = 4; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return 2; /* cycle on/off twice per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /* Blink the led */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) for (i = 4; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Restore the original value of the bcrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) for (i = 4; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * process one receive descriptor entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static void pcnet32_rx_entry(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct pcnet32_private *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct pcnet32_rx_head *rxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) int status = (short)le16_to_cpu(rxp->status) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) int rx_in_place = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) short pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (status != 0x03) { /* There was an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * There is a tricky error noted by John Murphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * <murf@perftech.com> to Russ Nelson: Even with full-sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * buffers it's possible for a jabber packet to use two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * buffers, with only the last correctly noting the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (status & 0x01) /* Only count a general error at the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev->stats.rx_errors++; /* end of a packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (status & 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (status & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (status & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (status & 0x04)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* Discard oversize frames. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (unlikely(pkt_len > PKT_BUF_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) netif_err(lp, drv, dev, "Impossible packet size %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (pkt_len < 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) netif_err(lp, rx_err, dev, "Runt packet!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (pkt_len > rx_copybreak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct sk_buff *newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) dma_addr_t new_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * map the new buffer, if mapping fails, drop the packet and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * reuse the old buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (newskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) skb_reserve(newskb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) new_dma_addr = dma_map_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) newskb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) PKT_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) netif_err(lp, rx_err, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) "DMA mapping error.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) dev_kfree_skb(newskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) skb = lp->rx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) lp->rx_dma_addr[entry],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) PKT_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) lp->rx_skbuff[entry] = newskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) lp->rx_dma_addr[entry] = new_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) rxp->base = cpu_to_le32(new_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) rx_in_place = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (!rx_in_place) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) skb_reserve(skb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) skb_put(skb, pkt_len); /* Make room */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dma_sync_single_for_cpu(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) lp->rx_dma_addr[entry], pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) skb_copy_to_linear_data(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) (unsigned char *)(lp->rx_skbuff[entry]->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) dma_sync_single_for_device(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) lp->rx_dma_addr[entry], pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) dev->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static int pcnet32_rx(struct net_device *dev, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) int entry = lp->cur_rx & lp->rx_mod_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int npackets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* If we own the next entry, it's a new packet. Send it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) pcnet32_rx_entry(dev, lp, rxp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) npackets += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * The docs say that the buffer length isn't touched, but Andrew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * Boyd of QNX reports that some revs of the 79C965 clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) wmb(); /* Make sure owner changes after others are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) rxp->status = cpu_to_le16(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) entry = (++lp->cur_rx) & lp->rx_mod_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) rxp = &lp->rx_ring[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static int pcnet32_tx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) unsigned int dirty_tx = lp->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int must_restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) while (dirty_tx != lp->cur_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int entry = dirty_tx & lp->tx_mod_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) break; /* It still hasn't been Txed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) lp->tx_ring[entry].base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (status & 0x4000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* There was a major error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) netif_err(lp, tx_err, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) "Tx error status=%04x err_status=%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) status, err_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (err_status & 0x04000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (err_status & 0x08000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if (err_status & 0x10000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) #ifndef DO_DXSUFLO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (err_status & 0x40000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* Ackk! On FIFO errors the Tx unit is turned off! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /* Remove this verbosity later! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) must_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) if (err_status & 0x40000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!lp->dxsuflo) { /* If controller doesn't recover ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /* Ackk! On FIFO errors the Tx unit is turned off! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* Remove this verbosity later! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) must_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (status & 0x1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /* We must free the original skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (lp->tx_skbuff[entry]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) lp->tx_dma_addr[entry],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) lp->tx_skbuff[entry]->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) dev_kfree_skb_any(lp->tx_skbuff[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) lp->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) lp->tx_dma_addr[entry] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) dirty_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (delta > lp->tx_ring_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) dirty_tx, lp->cur_tx, lp->tx_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) dirty_tx += lp->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) delta -= lp->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (lp->tx_full &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) delta < lp->tx_ring_size - 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /* The ring is no longer full, clear tbusy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) lp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) lp->dirty_tx = dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return must_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static int pcnet32_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct net_device *dev = lp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) work_done = pcnet32_rx(dev, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (pcnet32_tx(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* reset the chip to clear the error condition, then restart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) lp->a->reset(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) pcnet32_restart(dev, CSR0_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (work_done < budget && napi_complete_done(napi, work_done)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* clear interrupt masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) val = lp->a->read_csr(ioaddr, CSR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) val &= 0x00ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) lp->a->write_csr(ioaddr, CSR3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* Set interrupt enable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) #define PCNET32_REGS_PER_PHY 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) #define PCNET32_MAX_PHYS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static int pcnet32_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int j = lp->phycount * PCNET32_REGS_PER_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return (PCNET32_NUM_REGS + j) * sizeof(u16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int i, csr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) u16 *buff = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) const struct pcnet32_access *a = lp->a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ulong ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) csr0 = a->read_csr(ioaddr, CSR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!(csr0 & CSR0_STOP)) /* If not stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) pcnet32_suspend(dev, &flags, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* read address PROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) for (i = 0; i < 16; i += 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) *buff++ = inw(ioaddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* read control and status registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) for (i = 0; i < 90; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *buff++ = a->read_csr(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) *buff++ = a->read_csr(ioaddr, 112);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) *buff++ = a->read_csr(ioaddr, 114);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /* read bus configuration registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) for (i = 0; i < 30; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) *buff++ = a->read_bcr(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) for (i = 31; i < 36; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) *buff++ = a->read_bcr(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* read mii phy registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) for (j = 0; j < PCNET32_MAX_PHYS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (lp->phymask & (1 << j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) lp->a->write_bcr(ioaddr, 33,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) (j << 5) | i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) *buff++ = lp->a->read_bcr(ioaddr, 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (!(csr0 & CSR0_STOP)) /* If not stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) pcnet32_clr_suspend(lp, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static const struct ethtool_ops pcnet32_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) .get_drvinfo = pcnet32_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) .get_msglevel = pcnet32_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) .set_msglevel = pcnet32_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) .nway_reset = pcnet32_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) .get_link = pcnet32_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) .get_ringparam = pcnet32_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) .set_ringparam = pcnet32_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) .get_strings = pcnet32_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) .self_test = pcnet32_ethtool_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) .set_phys_id = pcnet32_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) .get_regs_len = pcnet32_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .get_regs = pcnet32_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) .get_sset_count = pcnet32_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) .get_link_ksettings = pcnet32_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) .set_link_ksettings = pcnet32_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* only probes for non-PCI devices, the rest are handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * pci_register_driver via pcnet32_probe_pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) unsigned int *port, ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /* search for PCnet32 VLB cards at known addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) for (port = pcnet32_portlist; (ioaddr = *port); port++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (request_region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* check if there is really a pcnet chip on that ioaddr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if ((inb(ioaddr + 14) == 0x57) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) (inb(ioaddr + 15) == 0x57)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) pcnet32_probe1(ioaddr, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) release_region(ioaddr, PCNET32_TOTAL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) unsigned long ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) pr_err("failed to enable device -- err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (!pci_resource_len(pdev, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) pr_err("card has no PCI IO resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) pr_err("architecture does not support 32bit PCI busmaster DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) ioaddr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) pr_err("io address range already allocated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) goto err_disable_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) err = pcnet32_probe1(ioaddr, 1, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) err_disable_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) static const struct net_device_ops pcnet32_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) .ndo_open = pcnet32_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) .ndo_stop = pcnet32_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) .ndo_start_xmit = pcnet32_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) .ndo_tx_timeout = pcnet32_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) .ndo_get_stats = pcnet32_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) .ndo_set_rx_mode = pcnet32_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) .ndo_do_ioctl = pcnet32_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) .ndo_poll_controller = pcnet32_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* pcnet32_probe1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * pdev will be NULL when called from pcnet32_probe_vlbus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct pcnet32_private *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) int i, media;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) int fdx, mii, fset, dxsuflo, sram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int chip_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) char *chipname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) const struct pcnet32_access *a = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) u8 promaddr[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /* reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) pcnet32_wio_reset(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) a = &pcnet32_wio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) pcnet32_dwio_reset(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) pcnet32_dwio_check(ioaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) a = &pcnet32_dwio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) pr_err("No access methods\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) goto err_release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) chip_version =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) pr_info(" PCnet chip version is %#x\n", chip_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if ((chip_version & 0xfff) != 0x003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) pr_info("Unsupported chip version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) goto err_release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /* initialize variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) fdx = mii = fset = dxsuflo = sram = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) chip_version = (chip_version >> 12) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) switch (chip_version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) case 0x2420:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) chipname = "PCnet/PCI 79C970"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case 0x2430:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) chipname = "PCnet/32 79C965"; /* 486/VL bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case 0x2621:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) chipname = "PCnet/PCI II 79C970A"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) case 0x2623:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) chipname = "PCnet/FAST 79C971"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) fset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) case 0x2624:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) chipname = "PCnet/FAST+ 79C972"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) fset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case 0x2625:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) chipname = "PCnet/FAST III 79C973"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) sram = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) case 0x2626:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) chipname = "PCnet/Home 79C978"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * This is based on specs published at www.amd.com. This section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * assumes that a card with a 79C978 wants to go into standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * and the module option homepna=1 can select this instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) media = a->read_bcr(ioaddr, 49);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) media &= ~3; /* default to 10Mb ethernet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (cards_found < MAX_UNITS && homepna[cards_found])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) media |= 1; /* switch to home wiring mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) (media & 1) ? "1" : "10");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) a->write_bcr(ioaddr, 49, media);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case 0x2627:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) chipname = "PCnet/FAST III 79C975"; /* PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) sram = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case 0x2628:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) chipname = "PCnet/PRO 79C976";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) fdx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) mii = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) pr_info("PCnet version %#x, no PCnet32 chip\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) chip_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto err_release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * starting until the packet is loaded. Strike one for reliability, lose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * one for latency - although on PCI this isn't a big loss. Older chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * have FIFO's smaller than a packet, so you can't do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (fset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) a->write_csr(ioaddr, 80,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) dxsuflo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * The Am79C973/Am79C975 controllers come with 12K of SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * which we can use for the Tx/Rx buffers but most importantly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * Tx fifo underflows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (sram) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * The SRAM is being configured in two steps. First we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * set the SRAM size in the BCR25:SRAM_SIZE bits. According
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * to the datasheet, each bit corresponds to a 512-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * page so we can have at most 24 pages. The SRAM_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * holds the value of the upper 8 bits of the 16-bit SRAM size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * The low 8-bits start at 0x00 and end at 0xff. So the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * address range is from 0x0000 up to 0x17ff. Therefore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * the SRAM_SIZE is set to 0x17. The next step is to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * the BCR26:SRAM_BND midway through so the Tx and Rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * buffers can share the SRAM equally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) a->write_bcr(ioaddr, 25, 0x17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) a->write_bcr(ioaddr, 26, 0xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /* And finally enable the NOUFLO bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) dev = alloc_etherdev(sizeof(*lp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) goto err_release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) pr_info("%s at %#3lx,", chipname, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /* In most chips, after a chip reset, the ethernet address is read from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * station address PROM at the base address and programmed into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * "Physical Address Registers" CSR12-14.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * As a precautionary measure, we read the PROM values and complain if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * they disagree with the CSRs. If they miscompare, and the PROM addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * is valid, then the PROM addr is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* There may be endianness issues here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) dev->dev_addr[2 * i] = val & 0x0ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* read PROM address and compare with CSR address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) for (i = 0; i < ETH_ALEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) promaddr[i] = inb(ioaddr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (!ether_addr_equal(promaddr, dev->dev_addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) !is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (is_valid_ether_addr(promaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (pcnet32_debug & NETIF_MSG_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) pr_cont(" warning: CSR address invalid,\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) pr_info(" using instead PROM address of");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) memcpy(dev->dev_addr, promaddr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (!is_valid_ether_addr(dev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) eth_zero_addr(dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (pcnet32_debug & NETIF_MSG_PROBE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) pr_cont(" %pM", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /* Version 0x2623 and 0x2624 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (((chip_version + 1) & 0xfffe) == 0x2624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) pr_info(" tx_start_pt(0x%04x):", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) switch (i >> 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) pr_cont(" 20 bytes,");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) pr_cont(" 64 bytes,");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) pr_cont(" 128 bytes,");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pr_cont("~220 bytes,");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) pr_cont(" BCR18(%x):", i & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (i & (1 << 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) pr_cont("BurstWrEn ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (i & (1 << 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) pr_cont("BurstRdEn ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (i & (1 << 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) pr_cont("DWordIO ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (i & (1 << 11))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) pr_cont("NoUFlow ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) i = a->read_bcr(ioaddr, 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) pr_info(" SRAMSIZE=0x%04x,", i << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) i = a->read_bcr(ioaddr, 26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) pr_cont(" SRAM_BND=0x%04x,", i << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) i = a->read_bcr(ioaddr, 27);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (i & (1 << 14))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) pr_cont("LowLatRx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) dev->base_addr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) lp->init_block = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sizeof(*lp->init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) &lp->init_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (!lp->init_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) pr_err("Coherent memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) goto err_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) lp->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) lp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) lp->name = chipname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) lp->shared_irq = shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) lp->tx_mod_mask = lp->tx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) lp->rx_mod_mask = lp->rx_ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) lp->mii_if.full_duplex = fdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) lp->mii_if.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) lp->mii_if.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) lp->dxsuflo = dxsuflo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) lp->mii = mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) lp->chip_version = chip_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) lp->msg_enable = pcnet32_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if ((cards_found >= MAX_UNITS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) (options[cards_found] >= sizeof(options_mapping)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) lp->options = PCNET32_PORT_ASEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) lp->options = options_mapping[options[cards_found]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /* force default port to TP on 79C970A so link detection can work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (lp->chip_version == PCNET32_79C970A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) lp->options = PCNET32_PORT_10BT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) lp->mii_if.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) lp->mii_if.mdio_read = mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) lp->mii_if.mdio_write = mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* napi.weight is used in both the napi and non-napi cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) lp->napi.weight = lp->rx_ring_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) lp->options |= PCNET32_PORT_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) lp->a = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /* prior to register_netdev, dev->name is not yet correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* detect special T1/E1 WAN card by checking for MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) dev->dev_addr[2] == 0x75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) lp->init_block->tlen_rlen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) lp->init_block->phys_addr[i] = dev->dev_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) lp->init_block->filter[0] = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) lp->init_block->filter[1] = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /* switch pcnet32 to 32bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) a->write_bcr(ioaddr, 20, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (pdev) { /* use the IRQ provided by PCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) pr_cont(" assigned IRQ %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) unsigned long irq_mask = probe_irq_on();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * To auto-IRQ we enable the initialization-done and DMA error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * interrupts. For ISA boards we get a DMA error, but VLB and PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * boards will work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /* Trigger an initialization just for the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) dev->irq = probe_irq_off(irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!dev->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) pr_cont(", failed to detect IRQ line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) pr_cont(", probed IRQ %d\n", dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /* Set the mii phy_id so that we can query the link state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /* lp->phycount and lp->phymask are set to 0 by memset above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) /* scan for PHYs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) for (i = 0; i < PCNET32_MAX_PHYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) unsigned short id1, id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) id1 = mdio_read(dev, i, MII_PHYSID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (id1 == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) id2 = mdio_read(dev, i, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (id2 == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) continue; /* 79C971 & 79C972 have phantom phy at id 31 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) lp->phycount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) lp->phymask |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) lp->mii_if.phy_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) pr_info("Found PHY %04x:%04x at address %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) id1, id2, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (lp->phycount > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) lp->options |= PCNET32_PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /* The PCNET32-specific entries in the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) dev->netdev_ops = &pcnet32_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) dev->ethtool_ops = &pcnet32_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dev->watchdog_timeo = (5 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* Fill in the generic fields of the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (register_netdev(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) lp->next = pcnet32_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) pcnet32_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (pcnet32_debug & NETIF_MSG_PROBE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) pr_info("%s: registered as %s\n", dev->name, lp->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) cards_found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* enable LED writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) err_free_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) pcnet32_free_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) lp->init_block, lp->init_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) err_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) err_release_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) release_region(ioaddr, PCNET32_TOTAL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /* if any allocation fails, caller must also call pcnet32_free_ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) &lp->tx_ring_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (lp->tx_ring == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) &lp->rx_ring_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (lp->rx_ring == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (!lp->tx_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!lp->rx_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (!lp->tx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) if (!lp->rx_skbuff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static void pcnet32_free_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) kfree(lp->tx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) lp->tx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) kfree(lp->rx_skbuff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) lp->rx_skbuff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) kfree(lp->tx_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) lp->tx_dma_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) kfree(lp->rx_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) lp->rx_dma_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (lp->tx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) lp->tx_ring, lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) lp->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (lp->rx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) lp->rx_ring, lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) lp->rx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static int pcnet32_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct pci_dev *pdev = lp->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (request_irq(dev->irq, pcnet32_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) lp->shared_irq ? IRQF_SHARED : 0, dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) (void *)dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* Check for a valid station address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (!is_valid_ether_addr(dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /* Reset the PCNET32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) lp->a->reset(ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /* switch pcnet32 to 32bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) lp->a->write_bcr(ioaddr, 20, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) netif_printk(lp, ifup, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) (u32) (lp->rx_ring_dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) (u32) (lp->init_dma_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) lp->fdx = !!(lp->options & PCNET32_PORT_FD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* set/reset autoselect bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) val = lp->a->read_bcr(ioaddr, 2) & ~2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (lp->options & PCNET32_PORT_ASEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) val |= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) lp->a->write_bcr(ioaddr, 2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /* handle full duplex setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (lp->mii_if.full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) val = lp->a->read_bcr(ioaddr, 9) & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (lp->options & PCNET32_PORT_FD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) val |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) val |= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) } else if (lp->options & PCNET32_PORT_ASEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) /* workaround of xSeries250, turn on for 79C975 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) if (lp->chip_version == 0x2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) val |= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) lp->a->write_bcr(ioaddr, 9, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) /* set/reset GPSI bit in test register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) val = lp->a->read_csr(ioaddr, 124) & ~0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) val |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) lp->a->write_csr(ioaddr, 124, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (lp->options & PCNET32_PORT_ASEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) netif_printk(lp, link, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) "Setting 100Mb-Full Duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (lp->phycount < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * 24 Jun 2004 according AMD, in order to change the PHY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * DANAS (or DISPM for 79C976) must be set; then select the speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * duplex, and/or enable auto negotiation, and clear DANAS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) lp->a->write_bcr(ioaddr, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) lp->a->read_bcr(ioaddr, 32) | 0x0080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* disable Auto Negotiation, set 10Mpbs, HD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (lp->options & PCNET32_PORT_FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) val |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (lp->options & PCNET32_PORT_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) val |= 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) lp->a->write_bcr(ioaddr, 32, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (lp->options & PCNET32_PORT_ASEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) lp->a->write_bcr(ioaddr, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) lp->a->read_bcr(ioaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 32) | 0x0080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /* enable auto negotiate, setup, disable fd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) val |= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) lp->a->write_bcr(ioaddr, 32, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int first_phy = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) u32 bcr9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * There is really no good other way to handle multiple PHYs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * other than turning off all automatics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) val = lp->a->read_bcr(ioaddr, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) lp->a->write_bcr(ioaddr, 2, val & ~2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) val = lp->a->read_bcr(ioaddr, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (!(lp->options & PCNET32_PORT_ASEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) /* setup ecmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) ecmd.port = PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ecmd.transceiver = XCVR_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) ecmd.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) ethtool_cmd_speed_set(&ecmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) (lp->options & PCNET32_PORT_100) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) SPEED_100 : SPEED_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) bcr9 = lp->a->read_bcr(ioaddr, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (lp->options & PCNET32_PORT_FD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) ecmd.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) bcr9 |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) ecmd.duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) bcr9 |= ~(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) lp->a->write_bcr(ioaddr, 9, bcr9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) for (i = 0; i < PCNET32_MAX_PHYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (lp->phymask & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /* isolate all but the first PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) bmcr = mdio_read(dev, i, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) if (first_phy == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) first_phy = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) mdio_write(dev, i, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) bmcr & ~BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) mdio_write(dev, i, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) bmcr | BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /* use mii_ethtool_sset to setup PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) lp->mii_if.phy_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) ecmd.phy_address = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (lp->options & PCNET32_PORT_ASEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) mii_ethtool_gset(&lp->mii_if, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ecmd.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) mii_ethtool_sset(&lp->mii_if, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) lp->mii_if.phy_id = first_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) #ifdef DO_DXSUFLO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (lp->dxsuflo) { /* Disable transmit stop on underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) val = lp->a->read_csr(ioaddr, CSR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) val |= 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) lp->a->write_csr(ioaddr, CSR3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) lp->init_block->mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) pcnet32_load_multicast(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (pcnet32_init_ring(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) goto err_free_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) napi_enable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /* Re-initialize the PCNET32, and start it when done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (lp->chip_version >= PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) /* Print the link status and start the watchdog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) pcnet32_check_media(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) while (i++ < 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * reports that doing so triggers a bug in the '974.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) netif_printk(lp, ifup, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) (u32) (lp->init_dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return 0; /* Always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) err_free_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /* free any allocated skbuffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) pcnet32_purge_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * Switch back to 16bit mode to avoid problems with dumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * DOS packet driver after a warm reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) lp->a->write_bcr(ioaddr, 20, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * The LANCE has been halted for one reason or another (busmaster memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * etc.). Modern LANCE variants always reload their ring-buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * configuration when restarted, so we must reinitialize our ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * context before restarting. As part of this reinitialization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * find all packets still on the Tx ring and pretend that they had been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * sent (in effect, drop the packets on the floor) - the higher-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) * protocols will time out and retransmit. It'd be better to shuffle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * these skbs to a temp list and then actually re-Tx them after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void pcnet32_purge_tx_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) for (i = 0; i < lp->tx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) lp->tx_ring[i].status = 0; /* CPU owns buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) wmb(); /* Make sure adapter sees owner change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (lp->tx_skbuff[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) lp->tx_dma_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) lp->tx_skbuff[i]->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) dev_kfree_skb_any(lp->tx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) lp->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) lp->tx_dma_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /* Initialize the PCNET32 Rx and Tx rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) static int pcnet32_init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) lp->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) lp->cur_rx = lp->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) lp->dirty_rx = lp->dirty_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) for (i = 0; i < lp->rx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (rx_skbuff == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) rx_skbuff = lp->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (!rx_skbuff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /* there is not much we can do at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) skb_reserve(rx_skbuff, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (lp->rx_dma_addr[i] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) lp->rx_dma_addr[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) PKT_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* there is not much we can do at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) netif_err(lp, drv, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) "%s pci dma mapping error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) wmb(); /* Make sure owner changes after all others are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) lp->rx_ring[i].status = cpu_to_le16(0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* The Tx buffer address is filled in as needed, but we do need to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * the upper ownership bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) for (i = 0; i < lp->tx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) lp->tx_ring[i].status = 0; /* CPU owns buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) wmb(); /* Make sure adapter sees owner change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) lp->tx_ring[i].base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) lp->tx_dma_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) lp->init_block->tlen_rlen =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) lp->init_block->phys_addr[i] = dev->dev_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) wmb(); /* Make sure all changes are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /* the pcnet32 has been issued a stop or reset. Wait for the stop bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) * then flush the pending transmit operations, re-initialize the ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * and tell the chip to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) /* wait for stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) for (i = 0; i < 100; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (i >= 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) pcnet32_purge_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (pcnet32_init_ring(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /* ReInit Ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) while (i++ < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) lp->a->write_csr(ioaddr, CSR0, csr0_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static void pcnet32_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) unsigned long ioaddr = dev->base_addr, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) /* Transmitter timeout, serious problems. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (pcnet32_debug & NETIF_MSG_DRV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) pr_err("%s: transmit timed out, status %4.4x, resetting\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) dev->name, lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (netif_msg_tx_err(lp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) printk(KERN_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) lp->cur_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) for (i = 0; i < lp->rx_ring_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) le32_to_cpu(lp->rx_ring[i].base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) le16_to_cpu(lp->rx_ring[i].status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) for (i = 0; i < lp->tx_ring_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) le32_to_cpu(lp->tx_ring[i].base),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) le32_to_cpu(lp->tx_ring[i].misc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) le16_to_cpu(lp->tx_ring[i].status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) pcnet32_restart(dev, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) netif_printk(lp, tx_queued, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) "%s() called, csr0 %4.4x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) __func__, lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /* Default status -- will not enable Successful-TxDone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * interrupt when that option is available to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) status = 0x8300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /* Fill in a Tx ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Mask to ring buffer boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) entry = lp->cur_tx & lp->tx_mod_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) /* Caution: the write order is important here, set the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * with the "ownership" bits last. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) lp->tx_ring[entry].misc = 0x00000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) lp->tx_dma_addr[entry] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) goto drop_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) lp->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) wmb(); /* Make sure owner changes after all others are visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) lp->tx_ring[entry].status = cpu_to_le16(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) lp->cur_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) /* Trigger an immediate send poll. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) lp->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) drop_packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /* The PCNET32 interrupt handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) pcnet32_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) struct pcnet32_private *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) unsigned long ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) u16 csr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) int boguscnt = max_interrupt_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) spin_lock(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) csr0 = lp->a->read_csr(ioaddr, CSR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) while ((csr0 & 0x8f00) && --boguscnt >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (csr0 == 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) break; /* PCMCIA remove happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) /* Acknowledge all of the current interrupt sources ASAP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) netif_printk(lp, intr, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) "interrupt csr0=%#2.2x new csr=%#2.2x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) csr0, lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) /* Log misc errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (csr0 & 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) dev->stats.tx_errors++; /* Tx babble. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (csr0 & 0x1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) * This happens when our receive ring is full. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * shouldn't be a problem as we will see normal rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * interrupts for the frames in the receive ring. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * there are some PCI chipsets (I can reproduce this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * on SP3G with Intel saturn chipset) which have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * sometimes problems and will fill up the receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) * ring with error descriptors. In this situation we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * don't get a rx interrupt, but a missed frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * interrupt sooner or later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) dev->stats.rx_errors++; /* Missed a Rx frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (csr0 & 0x0800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) csr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) /* unlike for the lance, there is no restart needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (napi_schedule_prep(&lp->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /* set interrupt masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) val = lp->a->read_csr(ioaddr, CSR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) val |= 0x5f00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) lp->a->write_csr(ioaddr, CSR3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) __napi_schedule(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) csr0 = lp->a->read_csr(ioaddr, CSR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) netif_printk(lp, intr, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) "exiting interrupt, csr0=%#4.4x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) spin_unlock(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) static int pcnet32_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) del_timer_sync(&lp->watchdog_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) napi_disable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) netif_printk(lp, ifdown, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) "Shutting down ethercard, status was %2.2x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) lp->a->read_csr(ioaddr, CSR0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) * Switch back to 16bit mode to avoid problems with dumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) * DOS packet driver after a warm reboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) lp->a->write_bcr(ioaddr, 20, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) pcnet32_purge_rx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) pcnet32_purge_tx_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) /* taken from the sunlance driver, which it took from the depca driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static void pcnet32_load_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) volatile struct pcnet32_init_block *ib = lp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) volatile __le16 *mcast_table = (__le16 *)ib->filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) /* set all multicast bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) ib->filter[0] = cpu_to_le32(~0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) ib->filter[1] = cpu_to_le32(~0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) /* clear the multicast filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ib->filter[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) ib->filter[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /* Add addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) crc = ether_crc_le(6, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) crc = crc >> 26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) le16_to_cpu(mcast_table[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) * Set or clear the multicast filter for this adaptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) static void pcnet32_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) unsigned long ioaddr = dev->base_addr, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) int csr15, suspended;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) suspended = pcnet32_suspend(dev, &flags, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) csr15 = lp->a->read_csr(ioaddr, CSR15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) /* Log any net taps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) lp->init_block->mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) lp->init_block->mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) pcnet32_load_multicast(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (suspended) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) pcnet32_clr_suspend(lp, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) pcnet32_restart(dev, CSR0_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) /* This routine assumes that the lp->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) u16 val_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (!lp->mii)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) val_out = lp->a->read_bcr(ioaddr, 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) return val_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /* This routine assumes that the lp->lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) unsigned long ioaddr = dev->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (!lp->mii)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) lp->a->write_bcr(ioaddr, 34, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) /* SIOC[GS]MIIxxx ioctls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) static int pcnet32_check_otherphy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) struct mii_if_info mii = lp->mii_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) for (i = 0; i < PCNET32_MAX_PHYS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (i == lp->mii_if.phy_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) continue; /* skip active phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (lp->phymask & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) mii.phy_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (mii_link_ok(&mii)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) /* found PHY with active link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) netif_info(lp, link, dev, "Using PHY number %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) /* isolate inactive phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) bmcr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) bmcr | BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) /* de-isolate new phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) bmcr = mdio_read(dev, i, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) mdio_write(dev, i, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) bmcr & ~BMCR_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) /* set new phy address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) lp->mii_if.phy_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) * Show the status of the media. Similar to mii_check_media however it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) * correctly shows the link speed for all (tested) pcnet32 variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) * Devices with no mii just report link state without speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) * Caller is assumed to hold and release the lp->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static void pcnet32_check_media(struct net_device *dev, int verbose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) int curr_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) int prev_link = netif_carrier_ok(dev) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) u32 bcr9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) curr_link = mii_link_ok(&lp->mii_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) } else if (lp->chip_version == PCNET32_79C970A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) ulong ioaddr = dev->base_addr; /* card base I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) /* only read link if port is set to TP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!lp->autoneg && lp->port_tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) else /* link always up for AUI port or port auto select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) curr_link = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) ulong ioaddr = dev->base_addr; /* card base I/O address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (!curr_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (prev_link || verbose) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) netif_info(lp, link, dev, "link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (lp->phycount > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) curr_link = pcnet32_check_otherphy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) prev_link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) } else if (verbose || !prev_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) if (lp->mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) if (netif_msg_link(lp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) struct ethtool_cmd ecmd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) .cmd = ETHTOOL_GSET };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) mii_ethtool_gset(&lp->mii_if, &ecmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) netdev_info(dev, "link up, %uMbps, %s-duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) ethtool_cmd_speed(&ecmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) (ecmd.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) bcr9 = lp->a->read_bcr(dev->base_addr, 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (lp->mii_if.full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) bcr9 |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) bcr9 &= ~(1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) lp->a->write_bcr(dev->base_addr, 9, bcr9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) netif_info(lp, link, dev, "link up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * Check for loss of link and link establishment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) * Could possibly be changed to use mii_check_media instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) static void pcnet32_watchdog(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) struct net_device *dev = lp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /* Print the link status if it has changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) pcnet32_check_media(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) static int __maybe_unused pcnet32_pm_suspend(struct device *device_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct net_device *dev = dev_get_drvdata(device_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) pcnet32_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) static int __maybe_unused pcnet32_pm_resume(struct device *device_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) struct net_device *dev = dev_get_drvdata(device_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) pcnet32_open(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) static void pcnet32_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) struct pcnet32_private *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) pcnet32_free_ring(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) lp->init_block, lp->init_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static struct pci_driver pcnet32_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) .name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) .probe = pcnet32_probe_pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) .remove = pcnet32_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) .id_table = pcnet32_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) .pm = &pcnet32_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) /* An additional parameter that may be passed in... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) static int debug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) static int tx_start_pt = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) static int pcnet32_have_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) module_param(debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) MODULE_PARM_DESC(debug, DRV_NAME " debug level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) module_param(max_interrupt_work, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) MODULE_PARM_DESC(max_interrupt_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) DRV_NAME " maximum events handled per interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) module_param(rx_copybreak, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) MODULE_PARM_DESC(rx_copybreak,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) DRV_NAME " copy breakpoint for copy-only-tiny-frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) module_param(tx_start_pt, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) module_param(pcnet32vlb, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) module_param_array(options, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) module_param_array(full_duplex, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) module_param_array(homepna, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) MODULE_PARM_DESC(homepna,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) DRV_NAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) MODULE_AUTHOR("Thomas Bogendoerfer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static int __init pcnet32_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) tx_start = tx_start_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) /* find the PCI devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (!pci_register_driver(&pcnet32_driver))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) pcnet32_have_pci = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* should we find any remaining VLbus devices ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (pcnet32vlb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) pcnet32_probe_vlbus(pcnet32_portlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) pr_info("%d cards_found\n", cards_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) static void __exit pcnet32_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) struct net_device *next_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) while (pcnet32_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) next_dev = lp->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) unregister_netdev(pcnet32_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) pcnet32_free_ring(pcnet32_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) lp->init_block, lp->init_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) free_netdev(pcnet32_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) pcnet32_dev = next_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) if (pcnet32_have_pci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) pci_unregister_driver(&pcnet32_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) module_init(pcnet32_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) module_exit(pcnet32_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) * Local variables:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * c-indent-level: 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * tab-width: 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * End:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) */