^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) Copyright 1999 Silicon Integrated System Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Revision: 1.08.10 Apr. 2 2006
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) Modified from the driver which is originally written by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) This software may be used and distributed according to the terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) of the GNU General Public License (GPL), incorporated herein by reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) Drivers based on this skeleton fall under the GPL and must retain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) the authorship (implicit copyright) notice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) References:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) SiS 7016 Fast Ethernet PCI Bus 10/100 Mbps LAN Controller with OnNow Support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) preliminary Rev. 1.0 Jan. 14, 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) SiS 900 Fast Ethernet PCI Bus 10/100 Mbps LAN Single Chip with OnNow Support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) preliminary Rev. 1.0 Nov. 10, 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) preliminary Rev. 1.0 Jan. 18, 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) Rev 1.08.10 Apr. 2 2006 Daniele Venzano add vlan (jumbo packets) support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) Rev 1.08.07 Nov. 2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) Rev 1.08.05 Jun. 6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) Rev 1.08.03 Feb. 1 2002 Matt Domsch <Matt_Domsch@dell.com> update to use library crc32 function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) Rev 1.08.02 Nov. 30 2001 Hui-Fen Hsu workaround for EDB & bug fix for dhcp problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) Rev 1.08.01 Aug. 25 2001 Hui-Fen Hsu update for 630ET & workaround for ICS1893 PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) Rev 1.08.00 Jun. 11 2001 Hui-Fen Hsu workaround for RTL8201 PHY and some bug fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) Rev 1.07.11 Apr. 2 2001 Hui-Fen Hsu updates PCI drivers to use the new pci_set_dma_mask for kernel 2.4.3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) Rev 1.07.10 Mar. 1 2001 Hui-Fen Hsu <hfhsu@sis.com.tw> some bug fix & 635M/B support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) Rev 1.07.09 Feb. 9 2001 Dave Jones <davej@suse.de> PCI enable cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) Rev 1.07.08 Jan. 8 2001 Lei-Chun Chang added RTL8201 PHY support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) Rev 1.07.07 Nov. 29 2000 Lei-Chun Chang added kernel-doc extractable documentation and 630 workaround fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) Rev 1.07.06 Nov. 7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) Rev 1.07.05 Nov. 6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) Rev 1.07.04 Sep. 6 2000 Lei-Chun Chang added ICS1893 PHY support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) Rev 1.07 Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) Rev 1.06.03 Dec. 23 1999 Ollie Lho Third release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) Rev 1.06.02 Nov. 23 1999 Ollie Lho bug in mac probing fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) Rev 1.06.01 Nov. 16 1999 Ollie Lho CRC calculation provide by Joseph Zbiciak (im14u2c@primenet.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) Rev 1.06 Nov. 4 1999 Ollie Lho (ollie@sis.com.tw) Second release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) Rev 1.05.05 Oct. 29 1999 Ollie Lho (ollie@sis.com.tw) Single buffer Tx/Rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) Chin-Shan Li (lcs@sis.com.tw) Added AMD Am79c901 HomePNA PHY support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) Rev 1.05 Aug. 7 1999 Jim Huang (cmhuang@sis.com.tw) Initial release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <asm/processor.h> /* Processor type for cache alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/uaccess.h> /* User space memory access functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include "sis900.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define SIS900_MODULE_NAME "sis900"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static const char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int max_interrupt_work = 40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int multicast_filter_limit = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int sis900_debug = -1; /* Use SIS900_DEF_MSG as value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define SIS900_DEF_MSG \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) (NETIF_MSG_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) NETIF_MSG_LINK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) NETIF_MSG_RX_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) NETIF_MSG_TX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define TX_TIMEOUT (4*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) SIS_900 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) SIS_7016
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static const char * card_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "SiS 900 PCI Fast Ethernet",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) "SiS 7016 PCI Fast Ethernet"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static const struct pci_device_id sis900_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_7016},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MODULE_DEVICE_TABLE (pci, sis900_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static const struct mii_chip_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const char * name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u16 phy_id0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 phy_id1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u8 phy_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define HOME 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define LAN 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define MIX 0x0003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define UNKNOWN 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) } mii_chip_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) { "ICS LAN PHY", 0x0015, 0xF440, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) { "VIA 6103 PHY", 0x0101, 0x8f20, LAN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {NULL,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct mii_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct mii_phy * next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u16 phy_id0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u16 phy_id1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u8 phy_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) typedef struct _BufferDesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 cmdsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 bufptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } BufferDesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct sis900_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct pci_dev * pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct mii_phy * mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct mii_phy * first_mii; /* record the first mii structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int cur_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct mii_if_info mii_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct timer_list timer; /* Link status detection timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u8 autong_complete; /* 1: auto-negotiate complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int cur_rx, dirty_rx; /* producer/consumer pointers for Tx/Rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned int cur_tx, dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* The saved address of a sent/receive-in-place packet buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct sk_buff *tx_skbuff[NUM_TX_DESC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct sk_buff *rx_skbuff[NUM_RX_DESC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) BufferDesc *tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BufferDesc *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dma_addr_t tx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dma_addr_t rx_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int tx_full; /* The Tx queue is full. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u8 host_bridge_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u8 chipset_rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* EEPROM data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int eeprom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) MODULE_DESCRIPTION("SiS 900 PCI Fast Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) module_param(multicast_filter_limit, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) module_param(max_interrupt_work, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) module_param(sis900_debug, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtered multicast addresses");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define sw32(reg, val) iowrite32(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define sw8(reg, val) iowrite8(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define sr32(reg) ioread32(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define sr16(reg) ioread16(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void sis900_poll(struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int sis900_open(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int sis900_mii_probe (struct net_device * net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void sis900_init_rxfilter (struct net_device * net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static u16 read_eeprom(void __iomem *ioaddr, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int mdio_read(struct net_device *net_dev, int phy_id, int location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void sis900_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static void sis900_init_tx_ring(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void sis900_init_rx_ring(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static netdev_tx_t sis900_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int sis900_rx(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void sis900_finish_xmit (struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static irqreturn_t sis900_interrupt(int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int sis900_close(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static u16 sis900_mcast_bitnr(u8 *addr, u8 revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static void set_rx_mode(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static void sis900_reset(struct net_device *net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void sis630_set_eq(struct net_device *net_dev, u8 revision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int sis900_set_config(struct net_device *dev, struct ifmap *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static u16 sis900_default_phy(struct net_device * net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static const struct ethtool_ops sis900_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * sis900_get_mac_addr - Get MAC address for stand alone SiS900 model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @pci_dev: the sis900 pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @net_dev: the net device to get address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Older SiS900 and friends, use EEPROM to store MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * MAC address is read from read_eeprom() into @net_dev->dev_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int sis900_get_mac_addr(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u16 signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* check to see if we have sane EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) signature = (u16) read_eeprom(ioaddr, EEPROMSignature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (signature == 0xffff || signature == 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) printk (KERN_WARNING "%s: Error EEPROM read %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pci_name(pci_dev), signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* get MAC address from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * sis630e_get_mac_addr - Get MAC address for SiS630E model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * @pci_dev: the sis900 pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @net_dev: the net device to get address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * SiS630E model, use APC CMOS RAM to store MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * APC CMOS RAM is accessed through ISA bridge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * MAC address is read into @net_dev->dev_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct pci_dev *isa_bridge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u8 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0008, isa_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!isa_bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0018, isa_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!isa_bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) printk(KERN_WARNING "%s: Can not find ISA bridge\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) pci_name(pci_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) pci_read_config_byte(isa_bridge, 0x48, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) pci_write_config_byte(isa_bridge, 0x48, reg | 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) outb(0x09 + i, 0x70);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pci_dev_put(isa_bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * sis635_get_mac_addr - Get MAC address for SIS635 model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * @pci_dev: the sis900 pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * @net_dev: the net device to get address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * SiS635 model, set MAC Reload Bit to load Mac address from APC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * to rfdr. rfdr is accessed through rfcr. MAC address is read into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * @net_dev->dev_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int sis635_get_mac_addr(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 rfcrSave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rfcrSave = sr32(rfcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) sw32(cr, rfcrSave | RELOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) sw32(cr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* disable packet filtering before setting filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) sw32(rfcr, rfcrSave & ~RFEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* load MAC addr to filter data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (i = 0 ; i < 3 ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sw32(rfcr, (i << RFADDR_shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* enable packet filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sw32(rfcr, rfcrSave | RFEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * sis96x_get_mac_addr - Get MAC address for SiS962 or SiS963 model
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @pci_dev: the sis900 pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @net_dev: the net device to get address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * SiS962 or SiS963 model, use EEPROM to store MAC address. And EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * is shared by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * LAN and 1394. When accessing EEPROM, send EEREQ signal to hardware first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * and wait for EEGNT. If EEGNT is ON, EEPROM is permitted to be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * by LAN, otherwise it is not. After MAC address is read from EEPROM, send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * EEDONE signal to refuse EEPROM access by LAN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * The EEPROM map of SiS962 or SiS963 is different to SiS900.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * The signature field in SiS962 or SiS963 spec is meaningless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * MAC address is read into @net_dev->dev_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int wait, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sw32(mear, EEREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) for (wait = 0; wait < 2000; wait++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (sr32(mear) & EEGNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u16 *mac = (u16 *)net_dev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* get MAC address from EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) sw32(mear, EEDONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static const struct net_device_ops sis900_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) .ndo_open = sis900_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) .ndo_stop = sis900_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) .ndo_start_xmit = sis900_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) .ndo_set_config = sis900_set_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) .ndo_set_rx_mode = set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) .ndo_do_ioctl = mii_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .ndo_tx_timeout = sis900_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .ndo_poll_controller = sis900_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * sis900_probe - Probe for sis900 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @pci_dev: the sis900 pci device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @pci_id: the pci device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Check and probe sis900 net device for @pci_dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Get mac address according to the chip revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * and assign SiS900-specific entries in the device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static int sis900_probe(struct pci_dev *pci_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) const struct pci_device_id *pci_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct sis900_private *sis_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct net_device *net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct pci_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) void *ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const char *card_name = card_names[pci_id->driver_data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const char *dev_name = pci_name(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* when built into the kernel, we only print version if device is found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) #ifndef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static int printed_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!printed_version++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* setup various bits in PCI command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ret = pcim_enable_device(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if(ret) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if(i){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) printk(KERN_ERR "sis900.c: architecture does not support "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) "32bit PCI busmaster DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pci_set_master(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) net_dev = alloc_etherdev(sizeof(struct sis900_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) SET_NETDEV_DEV(net_dev, &pci_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* We do a request_region() to register /proc/ioports info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret = pci_request_regions(pci_dev, "sis900");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* IO region. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ioaddr = pci_iomap(pci_dev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!ioaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) sis_priv->ioaddr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) sis_priv->pci_dev = pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) spin_lock_init(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) sis_priv->eeprom_size = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) pci_set_drvdata(pci_dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ring_space = dma_alloc_coherent(&pci_dev->dev, TX_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) &ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!ring_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) sis_priv->tx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) sis_priv->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) ring_space = dma_alloc_coherent(&pci_dev->dev, RX_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) &ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!ring_space) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto err_unmap_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sis_priv->rx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) sis_priv->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /* The SiS900-specific entries in the device structure. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) net_dev->netdev_ops = &sis900_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) net_dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) net_dev->ethtool_ops = &sis900_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (sis900_debug > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) sis_priv->msg_enable = sis900_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) sis_priv->msg_enable = SIS900_DEF_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) sis_priv->mii_info.dev = net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) sis_priv->mii_info.mdio_read = mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) sis_priv->mii_info.mdio_write = mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) sis_priv->mii_info.phy_id_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) sis_priv->mii_info.reg_num_mask = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Get Mac address according to the chip revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) sis_priv->chipset_rev = pci_dev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if(netif_msg_probe(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) printk(KERN_DEBUG "%s: detected revision %2.2x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) "trying to get MAC address...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_name, sis_priv->chipset_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (sis_priv->chipset_rev == SIS630E_900_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = sis630e_get_mac_addr(pci_dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) else if ((sis_priv->chipset_rev > 0x81) && (sis_priv->chipset_rev <= 0x90) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = sis635_get_mac_addr(pci_dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) else if (sis_priv->chipset_rev == SIS96x_900_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = sis96x_get_mac_addr(pci_dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = sis900_get_mac_addr(pci_dev, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) eth_hw_addr_random(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) "using random generated one\n", dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* 630ET : set the mii access mode as software-mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (sis_priv->chipset_rev == SIS630ET_900_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) sw32(cr, ACCESSMODE | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* probe for mii transceiver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (sis900_mii_probe(net_dev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) printk(KERN_WARNING "%s: Error probing MII device.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto err_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* save our host bridge revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) sis_priv->host_bridge_rev = dev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) pci_dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = register_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto err_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* print some information about our NIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) net_dev->name, card_name, ioaddr, pci_dev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Detect Wake on Lan support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = (sr32(CFGPMC) & PMESP) >> 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) err_unmap_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) sis_priv->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) err_unmap_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sis_priv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) err_out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) pci_iounmap(pci_dev, ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) free_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * sis900_mii_probe - Probe MII PHY for sis900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * @net_dev: the net device to probe for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Search for total of 32 possible mii phy addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Identify and set current phy if found one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * return error if it failed to found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static int sis900_mii_probe(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) const char *dev_name = pci_name(sis_priv->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) u16 poll_bit = MII_STAT_LINK, status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned long timeout = jiffies + 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) sis_priv->mii = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* search for total of 32 possible mii phy addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) for (phy_addr = 0; phy_addr < 32; phy_addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct mii_phy * mii_phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u16 mii_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mii_phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) for(i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) mii_status = mdio_read(net_dev, phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (mii_status == 0xffff || mii_status == 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (netif_msg_probe(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) printk(KERN_DEBUG "%s: MII at address %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) " not accessible\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_name, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if ((mii_phy = kmalloc(sizeof(struct mii_phy), GFP_KERNEL)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) mii_phy = sis_priv->first_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) while (mii_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct mii_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) phy = mii_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mii_phy = mii_phy->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) kfree(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mii_phy->phy_id0 = mdio_read(net_dev, phy_addr, MII_PHY_ID0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mii_phy->phy_id1 = mdio_read(net_dev, phy_addr, MII_PHY_ID1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mii_phy->phy_addr = phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mii_phy->status = mii_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mii_phy->next = sis_priv->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sis_priv->mii = mii_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sis_priv->first_mii = mii_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) for (i = 0; mii_chip_table[i].phy_id1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if ((mii_phy->phy_id0 == mii_chip_table[i].phy_id0 ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ((mii_phy->phy_id1 & 0xFFF0) == mii_chip_table[i].phy_id1)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) mii_phy->phy_types = mii_chip_table[i].phy_types;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (mii_chip_table[i].phy_types == MIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mii_phy->phy_types =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) (mii_status & (MII_STAT_CAN_TX_FDX | MII_STAT_CAN_TX)) ? LAN : HOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) printk(KERN_INFO "%s: %s transceiver found "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) "at address %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dev_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mii_chip_table[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if( !mii_chip_table[i].phy_id1 ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) printk(KERN_INFO "%s: Unknown PHY transceiver found at address %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_name, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mii_phy->phy_types = UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (sis_priv->mii == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) printk(KERN_INFO "%s: No MII transceivers found!\n", dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /* select default PHY for mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) sis_priv->mii = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) sis900_default_phy( net_dev );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Reset phy if default phy is internal sis900 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if ((sis_priv->mii->phy_id0 == 0x001D) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ((sis_priv->mii->phy_id1&0xFFF0) == 0x8000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) status = sis900_reset_phy(net_dev, sis_priv->cur_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* workaround for ICS1893 PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if ((sis_priv->mii->phy_id0 == 0x0015) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ((sis_priv->mii->phy_id1&0xFFF0) == 0xF440))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) mdio_write(net_dev, sis_priv->cur_phy, 0x0018, 0xD200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if(status & MII_STAT_LINK){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) while (poll_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) yield();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) poll_bit ^= (mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS) & poll_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (time_after_eq(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) printk(KERN_WARNING "%s: reset phy and link down now\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) dev_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (sis_priv->chipset_rev == SIS630E_900_REV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* SiS 630E has some bugs on default value of PHY registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mdio_write(net_dev, sis_priv->cur_phy, MII_ANADV, 0x05e1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG1, 0x22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mdio_write(net_dev, sis_priv->cur_phy, MII_CONFIG2, 0xff00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mdio_write(net_dev, sis_priv->cur_phy, MII_MASK, 0xffc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) //mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (sis_priv->mii->status & MII_STAT_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) netif_carrier_on(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) netif_carrier_off(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * sis900_default_phy - Select default PHY for sis900 mac.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @net_dev: the net device to probe for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Select first detected PHY with link as default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * If no one is link on, select PHY whose types is HOME as default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * If HOME doesn't exist, select LAN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static u16 sis900_default_phy(struct net_device * net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct mii_phy *phy = NULL, *phy_home = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *default_phy = NULL, *phy_lan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) for (phy=sis_priv->first_mii; phy; phy=phy->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* Link ON & Not select default PHY & not ghost PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if ((status & MII_STAT_LINK) && !default_phy &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) (phy->phy_types != UNKNOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) default_phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (phy->phy_types == HOME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) phy_home = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) else if(phy->phy_types == LAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) phy_lan = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (!default_phy && phy_home)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) default_phy = phy_home;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) else if (!default_phy && phy_lan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) default_phy = phy_lan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) else if (!default_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) default_phy = sis_priv->first_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (sis_priv->mii != default_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) sis_priv->mii = default_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sis_priv->cur_phy = default_phy->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) printk(KERN_INFO "%s: Using transceiver found at address %d as default\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) pci_name(sis_priv->pci_dev), sis_priv->cur_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) sis_priv->mii_info.phy_id = sis_priv->cur_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) status = mdio_read(net_dev, sis_priv->cur_phy, MII_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) status &= (~MII_CNTL_ISOLATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) mdio_write(net_dev, sis_priv->cur_phy, MII_CONTROL, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * sis900_set_capability - set the media capability of network adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * @net_dev : the net device to probe for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @phy : default PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Set the media capability of network adapter according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * mii status register. It's necessary before auto-negotiate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) u16 cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) mdio_read(net_dev, phy->phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mdio_read(net_dev, phy->phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) cap = MII_NWAY_CSMA_CD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ((phy->status & MII_STAT_CAN_TX) ? MII_NWAY_TX:0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ((phy->status & MII_STAT_CAN_T_FDX) ? MII_NWAY_T_FDX:0)|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ((phy->status & MII_STAT_CAN_T) ? MII_NWAY_T:0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mdio_write(net_dev, phy->phy_addr, MII_ANADV, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /* Delay between EEPROM clock transitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) #define eeprom_delay() sr32(mear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * read_eeprom - Read Serial EEPROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * @ioaddr: base i/o address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * @location: the EEPROM location to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * Read Serial EEPROM through EEPROM Access Register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Note that location is in word (16 bits) unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static u16 read_eeprom(void __iomem *ioaddr, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) u32 read_cmd = location | EEread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) u16 retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) sw32(mear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sw32(mear, EECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /* Shift the read command (9) bits out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) for (i = 8; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) sw32(mear, dataval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) sw32(mear, dataval | EECLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) sw32(mear, EECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* read the 16-bits data in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) for (i = 16; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) sw32(mear, EECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) sw32(mear, EECS | EECLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* Terminate the EEPROM access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) sw32(mear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) eeprom_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Read and write the MII management registers using software-generated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) serial MDIO protocol. Note that the command bits and data bits are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) send out separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #define mdio_delay() sr32(mear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static void mdio_idle(struct sis900_private *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) sw32(mear, MDIO | MDDIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) sw32(mear, MDIO | MDDIR | MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Synchronize the MII management interface by shifting 32 one bits out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static void mdio_reset(struct sis900_private *sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) for (i = 31; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) sw32(mear, MDDIR | MDIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) sw32(mear, MDDIR | MDIO | MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * mdio_read - read MII PHY register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @net_dev: the net device to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * @phy_id: the phy address to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * @location: the phy register id to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * Read MII registers through MDIO and MDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * using MDIO management frame structure and protocol(defined by ISO/IEC).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Please see SiS7014 or ICS spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) static int mdio_read(struct net_device *net_dev, int phy_id, int location)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct sis900_private *sp = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) u16 retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) mdio_reset(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mdio_idle(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) for (i = 15; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) sw32(mear, dataval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sw32(mear, dataval | MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* Read the 16 data bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) for (i = 16; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) sw32(mear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) sw32(mear, MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) sw32(mear, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * mdio_write - write MII PHY register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * @net_dev: the net device to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * @phy_id: the phy address to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * @location: the phy register id to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * @value: the register value to write with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Write MII registers with @value through MDIO and MDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * using MDIO management frame structure and protocol(defined by ISO/IEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * please see SiS7014 or ICS spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static void mdio_write(struct net_device *net_dev, int phy_id, int location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct sis900_private *sp = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) mdio_reset(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) mdio_idle(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Shift the command bits out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) for (i = 15; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) sw8(mear, dataval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) sw8(mear, dataval | MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* Shift the value bits out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) for (i = 15; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) sw32(mear, dataval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) sw32(mear, dataval | MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* Clear out extra bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) for (i = 2; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) sw8(mear, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sw8(mear, MDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) mdio_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) sw32(mear, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * sis900_reset_phy - reset sis900 mii phy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * @net_dev: the net device to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * @phy_addr: default phy address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * Some specific phy can't work properly without reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * This function will be called during initialization and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * link status change from ON to DOWN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) status = mdio_read(net_dev, phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) mdio_write( net_dev, phy_addr, MII_CONTROL, MII_CNTL_RESET );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * Polling 'interrupt' - used by things like netconsole to send skbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * without having to re-enable interrupts. It's not called while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * the interrupt routine is executing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static void sis900_poll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct sis900_private *sp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) const int irq = sp->pci_dev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) sis900_interrupt(irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * sis900_open - open sis900 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * @net_dev: the net device to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * Do some initialization and start net interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * enable interrupts and set sis900 timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) sis900_open(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Soft reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) sis900_reset(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* Equalizer workaround Rule */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) sis630_set_eq(net_dev, sis_priv->chipset_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) net_dev->name, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sis900_init_rxfilter(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sis900_init_tx_ring(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sis900_init_rx_ring(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) set_rx_mode(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) netif_start_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /* Workaround for EDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) /* Enable all known interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) sw32(cr, RxENA | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) sw32(ier, IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sis900_check_mode(net_dev, sis_priv->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* Set the timer to switch to check for link beat and perhaps switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) to an alternate media type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) timer_setup(&sis_priv->timer, sis900_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) sis_priv->timer.expires = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) add_timer(&sis_priv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * sis900_init_rxfilter - Initialize the Rx filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * @net_dev: the net device to initialize for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Set receive filter address to our MAC address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * and enable packet filtering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) sis900_init_rxfilter (struct net_device * net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) u32 rfcrSave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) rfcrSave = sr32(rfcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* disable packet filtering before setting filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) sw32(rfcr, rfcrSave & ~RFEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* load MAC addr to filter data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) for (i = 0 ; i < 3 ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) sw32(rfcr, i << RFADDR_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) sw32(rfdr, w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (netif_msg_hw(sis_priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) printk(KERN_DEBUG "%s: Receive Filter Address[%d]=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) net_dev->name, i, sr32(rfdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* enable packet filtering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) sw32(rfcr, rfcrSave | RFEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * sis900_init_tx_ring - Initialize the Tx descriptor ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * @net_dev: the net device to initialize for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Initialize the Tx descriptor ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) sis900_init_tx_ring(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) sis_priv->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) sis_priv->dirty_tx = sis_priv->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) for (i = 0; i < NUM_TX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) sis_priv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sis_priv->tx_ring[i].link = sis_priv->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ((i+1)%NUM_TX_DESC)*sizeof(BufferDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sis_priv->tx_ring[i].cmdsts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) sis_priv->tx_ring[i].bufptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* load Transmit Descriptor Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) sw32(txdp, sis_priv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (netif_msg_hw(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) net_dev->name, sr32(txdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * sis900_init_rx_ring - Initialize the Rx descriptor ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * @net_dev: the net device to initialize for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Initialize the Rx descriptor ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * and pre-allocate receive buffers (socket buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) sis900_init_rx_ring(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) sis_priv->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) sis_priv->dirty_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* init RX descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) for (i = 0; i < NUM_RX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) sis_priv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) sis_priv->rx_ring[i].link = sis_priv->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ((i+1)%NUM_RX_DESC)*sizeof(BufferDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) sis_priv->rx_ring[i].cmdsts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) sis_priv->rx_ring[i].bufptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* allocate sock buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) for (i = 0; i < NUM_RX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* not enough memory for skbuff, this makes a "hole"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) on the buffer ring, it is not clear how the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) hardware will react to this kind of degenerated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) sis_priv->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) sis_priv->rx_ring[i].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) sis_priv->rx_ring[i].bufptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) sis_priv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /* load Receive Descriptor Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) sw32(rxdp, sis_priv->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (netif_msg_hw(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) net_dev->name, sr32(rxdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * sis630_set_eq - set phy equalizer value for 630 LAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * @net_dev: the net device to set equalizer value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * @revision: 630 LAN revision number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * 630E equalizer workaround rule(Cyrus Huang 08/15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * PHY register 14h(Test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * Bit 14: 0 -- Automatically detect (default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * 1 -- Manually set Equalizer filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * Bit 13: 0 -- (Default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * 1 -- Speed up convergence of equalizer setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * Bit 9 : 0 -- (Default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * 1 -- Disable Baseline Wander
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Bit 3~7 -- Equalizer filter setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Link ON: Set Bit 9, 13 to 1, Bit 14 to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Then calculate equalizer value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * Link Off:Set Bit 13 to 1, Bit 14 to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Calculate Equalizer value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * When Link is ON and Bit 14 is 0, SIS900PHY will auto-detect proper equalizer value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) * When the equalizer is stable, this value is not a fixed value. It will be within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * 0 <= max <= 4 --> set equalizer to max
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * 5 <= max <= 14 --> set equalizer to max+1 or set equalizer to max+2 if max == min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * max >= 15 --> set equalizer to max+5 or set equalizer to max+6 if max == min
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void sis630_set_eq(struct net_device *net_dev, u8 revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) u16 reg14h, eq_value=0, max_value=0, min_value=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) int i, maxcount=10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if ( !(revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) revision == SIS630A_900_REV || revision == SIS630ET_900_REV) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (netif_carrier_ok(net_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) (0x2200 | reg14h) & 0xBFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) for (i=0; i < maxcount; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) eq_value = (0x00F8 & mdio_read(net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) sis_priv->cur_phy, MII_RESV)) >> 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) max_value=min_value=eq_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) max_value = (eq_value > max_value) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) eq_value : max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) min_value = (eq_value < min_value) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) eq_value : min_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* 630E rule to determine the equalizer value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (revision == SIS630E_900_REV || revision == SIS630EA1_900_REV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) revision == SIS630ET_900_REV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (max_value < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) eq_value = max_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) else if (max_value >= 5 && max_value < 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) eq_value = (max_value == min_value) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) max_value+2 : max_value+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) else if (max_value >= 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) eq_value=(max_value == min_value) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) max_value+6 : max_value+5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* 630B0&B1 rule to determine the equalizer value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (revision == SIS630A_900_REV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) (sis_priv->host_bridge_rev == SIS630B0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) sis_priv->host_bridge_rev == SIS630B1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (max_value == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) eq_value = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) eq_value = (max_value + min_value + 1)/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* write equalizer value and setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) reg14h = (reg14h & 0xFF07) | ((eq_value << 3) & 0x00F8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) reg14h = (reg14h | 0x6000) & 0xFDFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, reg14h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) reg14h = mdio_read(net_dev, sis_priv->cur_phy, MII_RESV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (revision == SIS630A_900_REV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) (sis_priv->host_bridge_rev == SIS630B0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) sis_priv->host_bridge_rev == SIS630B1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) (reg14h | 0x2200) & 0xBFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) (reg14h | 0x2000) & 0xBFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * sis900_timer - sis900 timer routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * @t: timer list containing a pointer to sis900 net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * On each timer ticks we check two things,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * link status (ON/OFF) and link mode (10/100/Full/Half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void sis900_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct sis900_private *sis_priv = from_timer(sis_priv, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct net_device *net_dev = sis_priv->mii_info.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct mii_phy *mii_phy = sis_priv->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static const int next_tick = 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int speed = 0, duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /* Link OFF -> ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!netif_carrier_ok(net_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) LookForLink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Search for new PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) status = sis900_default_phy(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) mii_phy = sis_priv->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (status & MII_STAT_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) WARN_ON(!(status & MII_STAT_AUTO_DONE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) sis900_read_mode(net_dev, &speed, &duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) sis900_set_mode(sis_priv, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) sis630_set_eq(net_dev, sis_priv->chipset_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) netif_carrier_on(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /* Link ON -> OFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (!(status & MII_STAT_LINK)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) netif_carrier_off(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if(netif_msg_link(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* Change mode issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if ((mii_phy->phy_id0 == 0x001D) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) ((mii_phy->phy_id1 & 0xFFF0) == 0x8000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) sis900_reset_phy(net_dev, sis_priv->cur_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) sis630_set_eq(net_dev, sis_priv->chipset_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) goto LookForLink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) sis_priv->timer.expires = jiffies + next_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) add_timer(&sis_priv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * sis900_check_mode - check the media mode for sis900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * @net_dev: the net device to be checked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * @mii_phy: the mii phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * Older driver gets the media mode from mii status output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * register. Now we set our media capability and auto-negotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * to get the upper bound of speed and duplex between two ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * If the types of mii phy is HOME, it doesn't need to auto-negotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * and autong_complete should be set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int speed, duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (mii_phy->phy_types == LAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) sw32(cfg, ~EXD & sr32(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) sis900_set_capability(net_dev , mii_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) sw32(cfg, EXD | sr32(cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) speed = HW_SPEED_HOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) duplex = FDX_CAPABLE_HALF_SELECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) sis900_set_mode(sis_priv, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) sis_priv->autong_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * sis900_set_mode - Set the media mode of mac register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * @sp: the device private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * @speed : the transmit speed to be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * @duplex: the duplex mode to be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * Set the media mode of mac register txcfg/rxcfg according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * speed and duplex of phy. Bit EDB_MASTER_EN indicates the EDB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * bus is used instead of PCI bus. When this bit is set 1, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * Max DMA Burst Size for TX/RX DMA should be no larger than 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * double words.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) u32 tx_flags = 0, rx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (sr32( cfg) & EDB_MASTER_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) (TX_FILL_THRESH << TxFILLT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) rx_flags = DMA_BURST_64 << RxMXDMA_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) tx_flags = TxATP | (DMA_BURST_512 << TxMXDMA_shift) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) (TX_FILL_THRESH << TxFILLT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) rx_flags = DMA_BURST_512 << RxMXDMA_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (speed == HW_SPEED_HOME || speed == HW_SPEED_10_MBPS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) rx_flags |= (RxDRNT_10 << RxDRNT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) tx_flags |= (TxDRNT_10 << TxDRNT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) rx_flags |= (RxDRNT_100 << RxDRNT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) tx_flags |= (TxDRNT_100 << TxDRNT_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (duplex == FDX_CAPABLE_FULL_SELECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) tx_flags |= (TxCSI | TxHBI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) rx_flags |= RxATX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) #if IS_ENABLED(CONFIG_VLAN_8021Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* Can accept Jumbo packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) rx_flags |= RxAJAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) sw32(txcfg, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) sw32(rxcfg, rx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * sis900_auto_negotiate - Set the Auto-Negotiation Enable/Reset bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * @net_dev: the net device to read mode for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * @phy_addr: mii phy address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * If the adapter is link-on, set the auto-negotiate enable/reset bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * autong_complete should be set to 0 when starting auto-negotiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * autong_complete should be set to 1 if we didn't start auto-negotiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * sis900_timer will wait for link on again if autong_complete = 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) status = mdio_read(net_dev, phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!(status & MII_STAT_LINK)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if(netif_msg_link(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) printk(KERN_INFO "%s: Media Link Off\n", net_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) sis_priv->autong_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) netif_carrier_off(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* (Re)start AutoNegotiate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) mdio_write(net_dev, phy_addr, MII_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) sis_priv->autong_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * sis900_read_mode - read media mode for sis900 internal phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * @net_dev: the net device to read mode for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * @speed : the transmit speed to be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * @duplex : the duplex mode to be determined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * The capability of remote end will be put in mii register autorec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * after auto-negotiation. Use AND operation to get the upper bound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * of speed and duplex between two ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct mii_phy *phy = sis_priv->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) int phy_addr = sis_priv->cur_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) u16 autoadv, autorec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) for (i = 0; i < 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) status = mdio_read(net_dev, phy_addr, MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (!(status & MII_STAT_LINK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* AutoNegotiate completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) autoadv = mdio_read(net_dev, phy_addr, MII_ANADV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) autorec = mdio_read(net_dev, phy_addr, MII_ANLPAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) status = autoadv & autorec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) *speed = HW_SPEED_10_MBPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *duplex = FDX_CAPABLE_HALF_SELECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (status & (MII_NWAY_TX | MII_NWAY_TX_FDX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) *speed = HW_SPEED_100_MBPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (status & ( MII_NWAY_TX_FDX | MII_NWAY_T_FDX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) *duplex = FDX_CAPABLE_FULL_SELECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) sis_priv->autong_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Workaround for Realtek RTL8201 PHY issue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if ((phy->phy_id0 == 0x0000) && ((phy->phy_id1 & 0xFFF0) == 0x8200)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (mdio_read(net_dev, phy_addr, MII_CONTROL) & MII_CNTL_FDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) *duplex = FDX_CAPABLE_FULL_SELECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (mdio_read(net_dev, phy_addr, 0x0019) & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) *speed = HW_SPEED_100_MBPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if(netif_msg_link(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) net_dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) *speed == HW_SPEED_100_MBPS ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) "100mbps" : "10mbps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) *duplex == FDX_CAPABLE_FULL_SELECTED ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * sis900_tx_timeout - sis900 transmit timeout routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * @net_dev: the net device to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * @txqueue: index of hanging queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * print transmit timeout status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * disable interrupts and do some tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (netif_msg_tx_err(sis_priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) net_dev->name, sr32(cr), sr32(isr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) sw32(imr, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* use spinlock to prevent interrupt handler accessing buffer ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) spin_lock_irqsave(&sis_priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* discard unsent packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) sis_priv->dirty_tx = sis_priv->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) for (i = 0; i < NUM_TX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) struct sk_buff *skb = sis_priv->tx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) dma_unmap_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) sis_priv->tx_ring[i].bufptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) sis_priv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) sis_priv->tx_ring[i].cmdsts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) sis_priv->tx_ring[i].bufptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) net_dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) sis_priv->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) netif_wake_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) spin_unlock_irqrestore(&sis_priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) netif_trans_update(net_dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* load Transmit Descriptor Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) sw32(txdp, sis_priv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /* Enable all known interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * sis900_start_xmit - sis900 start transmit routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * @skb: socket buffer pointer to put the data being transmitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * @net_dev: the net device to transmit with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * Set the transmit buffer descriptor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * and write TxENA to enable transmit state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * tell upper layer if the buffer is full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) unsigned int index_cur_tx, index_dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) unsigned int count_dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) spin_lock_irqsave(&sis_priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) /* Calculate the next Tx descriptor entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) entry = sis_priv->cur_tx % NUM_TX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) sis_priv->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* set the transmit buffer descriptor and enable Transmit State Machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) sis_priv->tx_ring[entry].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) sis_priv->tx_ring[entry].bufptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) sis_priv->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) net_dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) spin_unlock_irqrestore(&sis_priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) sw32(cr, TxENA | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) sis_priv->cur_tx ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) index_cur_tx = sis_priv->cur_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) index_dirty_tx = sis_priv->dirty_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) for (count_dirty_tx = 0; index_cur_tx != index_dirty_tx; index_dirty_tx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) count_dirty_tx ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) if (index_cur_tx == index_dirty_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* dirty_tx is met in the cycle of cur_tx, buffer full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) sis_priv->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) } else if (count_dirty_tx < NUM_TX_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* Typical path, tell upper layer that more transmission is possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) netif_start_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* buffer full, tell upper layer no more transmission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) sis_priv->tx_full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) spin_unlock_irqrestore(&sis_priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (netif_msg_tx_queued(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) "to slot %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) net_dev->name, skb->data, (int)skb->len, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * sis900_interrupt - sis900 interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * @irq: the irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * @dev_instance: the client data object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * The interrupt handler does all of the Rx thread work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * and cleans up after the Tx thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) struct net_device *net_dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) int boguscnt = max_interrupt_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) spin_lock (&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) status = sr32(isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if ((status & (HIBERR|TxURN|TxERR|TxDESC|RxORN|RxERR|RxOK)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /* nothing interesting happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /* why dow't we break after Tx/Rx case ?? keyword: full-duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (status & (RxORN | RxERR | RxOK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* Rx interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) sis900_rx(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (status & (TxURN | TxERR | TxDESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) /* Tx interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) sis900_finish_xmit(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /* something strange happened !!! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (status & HIBERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if(netif_msg_intr(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) printk(KERN_INFO "%s: Abnormal interrupt, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) "status %#8.8x.\n", net_dev->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (--boguscnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if(netif_msg_intr(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) printk(KERN_INFO "%s: Too much work at interrupt, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) "interrupt status = %#8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) net_dev->name, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if(netif_msg_intr(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) printk(KERN_DEBUG "%s: exiting interrupt, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) "interrupt status = %#8.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) net_dev->name, sr32(isr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) spin_unlock (&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * sis900_rx - sis900 receive routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * @net_dev: the net device which receives data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * Process receive interrupt events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * put buffer to higher layer and refill buffer pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * Note: This function is called by interrupt handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * don't do "too much" work here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static int sis900_rx(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int rx_work_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (netif_msg_rx_status(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) "status:0x%8.8x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) while (rx_status & OWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) unsigned int rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) unsigned int data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (--rx_work_limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) data_size = rx_status & DSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) rx_size = data_size - CRC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #if IS_ENABLED(CONFIG_VLAN_8021Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /* ``TOOLONG'' flag means jumbo packet received. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) rx_status &= (~ ((unsigned int)TOOLONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* corrupted packet received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (netif_msg_rx_err(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) printk(KERN_DEBUG "%s: Corrupted packet "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) "received, buffer status = 0x%8.8x/%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) net_dev->name, rx_status, data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) net_dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (rx_status & OVERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) net_dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (rx_status & (TOOLONG|RUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) net_dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (rx_status & (RXISERR | FAERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) net_dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (rx_status & CRCERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) net_dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* reset buffer descriptor state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct sk_buff * skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) struct sk_buff * rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) dma_unmap_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) sis_priv->rx_ring[entry].bufptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) RX_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) /* refill the Rx buffer, what if there is not enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * memory for new socket buffer ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * Not enough memory to refill the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * so we need to recycle the old one so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * as to avoid creating a memory hole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * in the rx ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) skb = sis_priv->rx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) net_dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) goto refill_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* This situation should never happen, but due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) some unknown bugs, it is possible that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) we are working on NULL sk_buff :-( */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (sis_priv->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (netif_msg_rx_err(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) printk(KERN_WARNING "%s: NULL pointer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) "encountered in Rx ring\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) "cur_rx:%4.4d, dirty_rx:%4.4d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) net_dev->name, sis_priv->cur_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) sis_priv->dirty_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) /* give the socket buffer to upper layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) rx_skb = sis_priv->rx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) skb_put(rx_skb, rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) rx_skb->protocol = eth_type_trans(rx_skb, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) netif_rx(rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* some network statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if ((rx_status & BCAST) == MCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) net_dev->stats.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) net_dev->stats.rx_bytes += rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) net_dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) sis_priv->dirty_rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) refill_rx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) sis_priv->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) sis_priv->rx_ring[entry].bufptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) dma_map_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) skb->data, RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) sis_priv->rx_ring[entry].bufptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) sis_priv->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) sis_priv->cur_rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) entry = sis_priv->cur_rx % NUM_RX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) rx_status = sis_priv->rx_ring[entry].cmdsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) } // while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) /* refill the Rx buffer, what if the rate of refilling is slower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * than consuming ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) entry = sis_priv->dirty_rx % NUM_RX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (sis_priv->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* not enough memory for skbuff, this makes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * "hole" on the buffer ring, it is not clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * how the hardware will react to this kind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * of degenerated buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) net_dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) sis_priv->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) sis_priv->rx_ring[entry].bufptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) dma_map_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) skb->data, RX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) sis_priv->rx_ring[entry].bufptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) sis_priv->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /* re-enable the potentially idle receive state matchine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) sw32(cr , RxENA | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * sis900_finish_xmit - finish up transmission of packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * @net_dev: the net device to be transmitted on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * Check for error condition and free socket buffer etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * schedule for more transmission as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * Note: This function is called by interrupt handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * don't do "too much" work here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) static void sis900_finish_xmit (struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) u32 tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) entry = sis_priv->dirty_tx % NUM_TX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) tx_status = sis_priv->tx_ring[entry].cmdsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (tx_status & OWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* The packet is not transmitted yet (owned by hardware) !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * Note: this is an almost impossible condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) * on TxDESC interrupt ('descriptor interrupt') */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) if (tx_status & (ABORT | UNDERRUN | OWCOLL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /* packet unsuccessfully transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (netif_msg_tx_err(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) printk(KERN_DEBUG "%s: Transmit "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) "error, Tx status %8.8x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) net_dev->name, tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) net_dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (tx_status & UNDERRUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) net_dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (tx_status & ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) net_dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if (tx_status & NOCARRIER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) net_dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (tx_status & OWCOLL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) net_dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /* packet successfully transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) net_dev->stats.collisions += (tx_status & COLCNT) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) net_dev->stats.tx_bytes += tx_status & DSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) net_dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* Free the original skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) skb = sis_priv->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) dma_unmap_single(&sis_priv->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) sis_priv->tx_ring[entry].bufptr, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) sis_priv->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) sis_priv->tx_ring[entry].bufptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) sis_priv->tx_ring[entry].cmdsts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) if (sis_priv->tx_full && netif_queue_stopped(net_dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) sis_priv->cur_tx - sis_priv->dirty_tx < NUM_TX_DESC - 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /* The ring is no longer full, clear tx_full and schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * more transmission by netif_wake_queue(net_dev) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) sis_priv->tx_full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) netif_wake_queue (net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * sis900_close - close sis900 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) * @net_dev: the net device to be closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Disable interrupts, stop the Tx and Rx Status Machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * free Tx and RX socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static int sis900_close(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) struct pci_dev *pdev = sis_priv->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* Disable interrupts by clearing the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) sw32(imr, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sw32(ier, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /* Stop the chip's Tx and Rx Status Machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) sw32(cr, RxDIS | TxDIS | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) del_timer(&sis_priv->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) free_irq(pdev->irq, net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* Free Tx and RX skbuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) for (i = 0; i < NUM_RX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) skb = sis_priv->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) dma_unmap_single(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) sis_priv->rx_ring[i].bufptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) RX_BUF_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) sis_priv->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) for (i = 0; i < NUM_TX_DESC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) skb = sis_priv->tx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) dma_unmap_single(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) sis_priv->tx_ring[i].bufptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) sis_priv->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /* Green! Put the chip in low-power mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * sis900_get_drvinfo - Return information about driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * @net_dev: the net device to probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * @info: container for info returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * Process ethtool command such as "ehtool -i" to show information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) static void sis900_get_drvinfo(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static u32 sis900_get_msglevel(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) return sis_priv->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) sis_priv->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) static u32 sis900_get_link(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) return mii_link_ok(&sis_priv->mii_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static int sis900_get_link_ksettings(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) spin_lock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) mii_ethtool_get_link_ksettings(&sis_priv->mii_info, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) spin_unlock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) static int sis900_set_link_ksettings(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) int rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) spin_lock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rt = mii_ethtool_set_link_ksettings(&sis_priv->mii_info, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) spin_unlock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) return rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static int sis900_nway_reset(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return mii_nway_restart(&sis_priv->mii_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * sis900_set_wol - Set up Wake on Lan registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * @net_dev: the net device to probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * @wol: container for info passed to the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * Process ethtool command "wol" to setup wake on lan features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * SiS900 supports sending WoL events if a correct packet is received,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * but there is no simple way to filter them to only a subset (broadcast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * multicast, unicast or arp).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) u32 cfgpmcsr = 0, pmctrl_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (wol->wolopts == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) cfgpmcsr &= ~PME_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) sw32(pmctrl, pmctrl_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) if (netif_msg_wol(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_UCAST | WAKE_MCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) | WAKE_BCAST | WAKE_ARP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (wol->wolopts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) pmctrl_bits |= MAGICPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (wol->wolopts & WAKE_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) pmctrl_bits |= LINKON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) sw32(pmctrl, pmctrl_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) cfgpmcsr |= PME_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (netif_msg_wol(sis_priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) printk(KERN_DEBUG "%s: Wake on LAN enabled\n", net_dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct sis900_private *sp = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) void __iomem *ioaddr = sp->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) u32 pmctrl_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) pmctrl_bits = sr32(pmctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (pmctrl_bits & MAGICPKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) wol->wolopts |= WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (pmctrl_bits & LINKON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) wol->wolopts |= WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) wol->supported = (WAKE_PHY | WAKE_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) static int sis900_get_eeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct sis900_private *sis_priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return sis_priv->eeprom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int wait, ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) u16 signature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) u16 *ebuf = (u16 *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (sis_priv->chipset_rev == SIS96x_900_REV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) sw32(mear, EEREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) for (wait = 0; wait < 2000; wait++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (sr32(mear) & EEGNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) /* read 16 bits, and index by 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) for (i = 0; i < sis_priv->eeprom_size / 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ebuf[i] = (u16)read_eeprom(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) sw32(mear, EEDONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) signature = (u16)read_eeprom(ioaddr, EEPROMSignature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (signature != 0xffff && signature != 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* read 16 bits, and index by 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) for (i = 0; i < sis_priv->eeprom_size / 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ebuf[i] = (u16)read_eeprom(ioaddr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) #define SIS900_EEPROM_MAGIC 0xBABE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) struct sis900_private *sis_priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) u8 *eebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (!eebuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) eeprom->magic = SIS900_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) spin_lock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) res = sis900_read_eeprom(dev, eebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) spin_unlock_irq(&sis_priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) memcpy(data, eebuf + eeprom->offset, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) kfree(eebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) static const struct ethtool_ops sis900_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) .get_drvinfo = sis900_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) .get_msglevel = sis900_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) .set_msglevel = sis900_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) .get_link = sis900_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) .nway_reset = sis900_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) .get_wol = sis900_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) .set_wol = sis900_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) .get_link_ksettings = sis900_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) .set_link_ksettings = sis900_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) .get_eeprom_len = sis900_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) .get_eeprom = sis900_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) * mii_ioctl - process MII i/o control command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * @net_dev: the net device to command for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * @rq: parameter for command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * @cmd: the i/o command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * Process MII command like read/write MII register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct mii_ioctl_data *data = if_mii(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) switch(cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) case SIOCGMIIPHY: /* Get address of MII PHY in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) data->phy_id = sis_priv->mii->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) case SIOCGMIIREG: /* Read MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) data->val_out = mdio_read(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) case SIOCSMIIREG: /* Write MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) mdio_write(net_dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * sis900_set_config - Set media type by net_device.set_config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * @dev: the net device for media type change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * @map: ifmap passed by ifconfig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * Set media type to 10baseT, 100baseT or 0(for auto) by ifconfig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * we support only port changes. All other runtime configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) * changes will be ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) static int sis900_set_config(struct net_device *dev, struct ifmap *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) struct sis900_private *sis_priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) struct mii_phy *mii_phy = sis_priv->mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /* we switch on the ifmap->port field. I couldn't find anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * like a definition or standard for the values of that field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * I think the meaning of those values is device specific. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * since I would like to change the media type via the ifconfig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * command I use the definition from linux/netdevice.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) * (which seems to be different from the ifport(pcmcia) definition) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) switch(map->port){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) case IF_PORT_UNKNOWN: /* use auto here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) dev->if_port = map->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /* we are going to change the media type, so the Link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * will be temporary down and we need to reflect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * here. When the Link comes up again, it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * sensed by the sis_timer procedure, which also does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * all the rest for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /* read current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /* enable auto negotiation and reset the negotioation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * (I don't really know what the auto negatiotiation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * reset really means, but it sounds for me right to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * do one here) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) mdio_write(dev, mii_phy->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) MII_CONTROL, status | MII_CNTL_AUTO | MII_CNTL_RST_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) case IF_PORT_10BASET: /* 10BaseT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) dev->if_port = map->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* we are going to change the media type, so the Link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * will be temporary down and we need to reflect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * here. When the Link comes up again, it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * sensed by the sis_timer procedure, which also does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * all the rest for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) /* set Speed to 10Mbps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /* read current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) /* disable auto negotiation and force 10MBit mode*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) mdio_write(dev, mii_phy->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) MII_CONTROL, status & ~(MII_CNTL_SPEED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) MII_CNTL_AUTO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) case IF_PORT_100BASET: /* 100BaseT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) case IF_PORT_100BASETX: /* 100BaseTx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) dev->if_port = map->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) /* we are going to change the media type, so the Link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * will be temporary down and we need to reflect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) * here. When the Link comes up again, it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * sensed by the sis_timer procedure, which also does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * all the rest for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) /* set Speed to 100Mbps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) /* disable auto negotiation and enable 100MBit Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) status = mdio_read(dev, mii_phy->phy_addr, MII_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) mdio_write(dev, mii_phy->phy_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) MII_CONTROL, (status & ~MII_CNTL_SPEED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) MII_CNTL_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) case IF_PORT_10BASE2: /* 10Base2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) case IF_PORT_AUI: /* AUI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) case IF_PORT_100BASEFX: /* 100BaseFx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) /* These Modes are not supported (are they?)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * sis900_mcast_bitnr - compute hashtable index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * @addr: multicast address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * @revision: revision id of chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * SiS 900 uses the most sigificant 7 bits to index a 128 bits multicast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * hash table, which makes this function a little bit different from other drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * SiS 900 B0 & 635 M/B uses the most significat 8 bits to index 256 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * multicast hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) u32 crc = ether_crc(6, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) /* leave 8 or 7 most siginifant bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if ((revision >= SIS635A_900_REV) || (revision == SIS900B_900_REV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return (int)(crc >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) return (int)(crc >> 25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) * set_rx_mode - Set SiS900 receive mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * @net_dev: the net device to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * Set SiS900 receive mode for promiscuous, multicast, or broadcast mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * And set the appropriate multicast filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * Multicast hash table changes from 128 to 256 bits for 635M/B & 900B0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) static void set_rx_mode(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) int i, table_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) u32 rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* 635 Hash Table entries = 256(2^16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if((sis_priv->chipset_rev >= SIS635A_900_REV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) (sis_priv->chipset_rev == SIS900B_900_REV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) table_entries = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) table_entries = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (net_dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /* Accept any kinds of packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) rx_mode = RFPromiscuous;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) for (i = 0; i < table_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) mc_filter[i] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) (net_dev->flags & IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) /* too many multicast addresses or accept all multicast packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) rx_mode = RFAAB | RFAAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) for (i = 0; i < table_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) mc_filter[i] = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) /* Accept Broadcast packet, destination address matchs our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) * MAC address, use Receive Filter to reject unwanted MCAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) * packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) rx_mode = RFAAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) netdev_for_each_mc_addr(ha, net_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) unsigned int bit_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) bit_nr = sis900_mcast_bitnr(ha->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) sis_priv->chipset_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) /* update Multicast Hash Table in Receive Filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) for (i = 0; i < table_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /* why plus 0x04 ??, That makes the correct value for hash table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) sw32(rfdr, mc_filter[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) sw32(rfcr, RFEN | rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) /* sis900 is capable of looping back packets at MAC level for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * debugging purpose */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (net_dev->flags & IFF_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) u32 cr_saved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* We must disable Tx/Rx before setting loopback mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) cr_saved = sr32(cr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) sw32(cr, cr_saved | TxDIS | RxDIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) /* enable loopback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) sw32(txcfg, sr32(txcfg) | TxMLB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) sw32(rxcfg, sr32(rxcfg) | RxATX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) /* restore cr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) sw32(cr, cr_saved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * sis900_reset - Reset sis900 MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * @net_dev: the net device to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * reset sis900 MAC and wait until finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * reset through command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * change backoff algorithm for 900B0 & 635 M/B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) static void sis900_reset(struct net_device *net_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) u32 status = TxRCMP | RxRCMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) sw32(ier, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) sw32(imr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) sw32(rfcr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) /* Check that the chip has finished the reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) for (i = 0; status && (i < 1000); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) status ^= sr32(isr) & status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (sis_priv->chipset_rev >= SIS635A_900_REV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) sis_priv->chipset_rev == SIS900B_900_REV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) sw32(cfg, PESEL | RND_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) sw32(cfg, PESEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * sis900_remove - Remove sis900 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * @pci_dev: the pci device to be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * remove and release SiS900 net device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static void sis900_remove(struct pci_dev *pci_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct net_device *net_dev = pci_get_drvdata(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) unregister_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) while (sis_priv->first_mii) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) struct mii_phy *phy = sis_priv->first_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) sis_priv->first_mii = phy->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) kfree(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) sis_priv->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) sis_priv->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) pci_iounmap(pci_dev, sis_priv->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) free_netdev(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) static int __maybe_unused sis900_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) struct net_device *net_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if(!netif_running(net_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) netif_stop_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) netif_device_detach(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /* Stop the chip's Tx and Rx Status Machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) sw32(cr, RxDIS | TxDIS | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) static int __maybe_unused sis900_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) struct net_device *net_dev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) struct sis900_private *sis_priv = netdev_priv(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) void __iomem *ioaddr = sis_priv->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if(!netif_running(net_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) sis900_init_rxfilter(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) sis900_init_tx_ring(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) sis900_init_rx_ring(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) set_rx_mode(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) netif_device_attach(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) netif_start_queue(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* Workaround for EDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) /* Enable all known interrupts by setting the interrupt mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxDESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) sw32(cr, RxENA | sr32(cr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) sw32(ier, IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) sis900_check_mode(net_dev, sis_priv->mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) static SIMPLE_DEV_PM_OPS(sis900_pm_ops, sis900_suspend, sis900_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) static struct pci_driver sis900_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) .name = SIS900_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) .id_table = sis900_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) .probe = sis900_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) .remove = sis900_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) .driver.pm = &sis900_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static int __init sis900_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /* when a module, this is printed whether or not devices are found in probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return pci_register_driver(&sis900_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static void __exit sis900_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) pci_unregister_driver(&sis900_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) module_init(sis900_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) module_exit(sis900_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)