^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Silan SC92031 PCI Fast Ethernet Adapter driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based on vendor drivers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Silan Fast Ethernet Netcard Driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * MODULE_AUTHOR ("gaoyonghong");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 8139D Fast Ethernet driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * (C) 2002 by gaoyonghong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * MODULE_AUTHOR ("gaoyonghong");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Both are almost identical and seem to be based on pci-skeleton.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Rewritten for 2.6 by Cesar Eduardo Barros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * A datasheet for this chip can be found at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* Note about set_mac_address: I don't know how to change the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * matching, so you need to enable IFF_PROMISC when using it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SC92031_NAME "sc92031"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* BAR 0 is MMIO, BAR 1 is PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SC92031_USE_PIO 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int multicast_filter_limit = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) module_param(multicast_filter_limit, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_PARM_DESC(multicast_filter_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) "Maximum number of filtered multicast addresses");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static int media;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) module_param(media, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) " 0x01 = 10M half, 0x02 = 10M full,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) " 0x04 = 100M half, 0x08 = 100M full)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Size of the in-memory receive ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define RX_BUF_LEN_IDX 3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Number of Tx descriptor registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define NUM_TX_DESC 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define MAX_ETH_FRAME_SIZE 1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define RX_FIFO_THRESH 7 /* Rx buffer level before first PCI xfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define TX_TIMEOUT (4*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SILAN_STATS_NUM 2 /* number of ETHTOOL_GSTATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* media options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define AUTOSELECT 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define M10_HALF 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define M10_FULL 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define M100_HALF 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define M100_FULL 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Symbolic offsets to registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum silan_registers {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) Config0 = 0x00, // Config0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) Config1 = 0x04, // Config1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) RxBufWPtr = 0x08, // Rx buffer writer poiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) IntrStatus = 0x0C, // Interrupt status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) IntrMask = 0x10, // Interrupt mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) RxbufAddr = 0x14, // Rx buffer start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) RxBufRPtr = 0x18, // Rx buffer read pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) Txstatusall = 0x1C, // Transmit status of all descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) TxStatus0 = 0x20, // Transmit status (Four 32bit registers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) TxAddr0 = 0x30, // Tx descriptors (also four 32bit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) RxConfig = 0x40, // Rx configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MAC0 = 0x44, // Ethernet hardware address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MAR0 = 0x4C, // Multicast filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) RxStatus0 = 0x54, // Rx status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) TxConfig = 0x5C, // Tx configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) PhyCtrl = 0x60, // physical control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) FlowCtrlConfig = 0x64, // flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) Miicmd0 = 0x68, // Mii command0 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) Miicmd1 = 0x6C, // Mii command1 register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) Miistatus = 0x70, // Mii status register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) Timercnt = 0x74, // Timer counter register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) TimerIntr = 0x78, // Timer interrupt register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) PMConfig = 0x7C, // Power Manager configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) CRC0 = 0x80, // Power Manager CRC ( Two 32bit regisers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) Wakeup0 = 0x88, // power Manager wakeup( Eight 64bit regiser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) LSBCRC0 = 0xC8, // power Manager LSBCRC(Two 32bit regiser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) TestD0 = 0xD0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) TestD4 = 0xD4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) TestD8 = 0xD8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define MII_JAB 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MII_OutputStatus 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define PHY_16_JAB_ENB 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define PHY_16_PORT_ENB 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) enum IntrStatusBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) LinkFail = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) LinkOK = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) TimeOut = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) RxOverflow = 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) RxOK = 0x0020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) TxOK = 0x0001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum TxStatusBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) TxCarrierLost = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) TxAborted = 0x10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) TxOutOfWindow = 0x08000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) TxNccShift = 22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) EarlyTxThresShift = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) TxStatOK = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) TxUnderrun = 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) TxOwn = 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) enum RxStatusBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) RxStatesOK = 0x80000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) RxBadAlign = 0x40000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) RxHugeFrame = 0x20000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) RxSmallFrame = 0x10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) RxCRCOK = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) RxCrlFrame = 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) Rx_Broadcast = 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) Rx_Multicast = 0x1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) RxAddrMatch = 0x0800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) MiiErr = 0x0400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) enum RxConfigBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) RxFullDx = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) RxEnb = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) RxSmall = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) RxHuge = 0x10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) RxErr = 0x08000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) RxAllphys = 0x04000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) RxMulticast = 0x02000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) RxBroadcast = 0x01000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) RxLoopBack = (1 << 23) | (1 << 22),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) LowThresholdShift = 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) HighThresholdShift = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) enum TxConfigBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) TxFullDx = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) TxEnb = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) TxEnbPad = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) TxEnbHuge = 0x10000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) TxEnbFCS = 0x08000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) TxNoBackOff = 0x04000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) TxEnbPrem = 0x02000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) TxCareLostCrs = 0x1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) TxExdCollNum = 0xf00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) TxDataRate = 0x80000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) enum PhyCtrlconfigbits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) PhyCtrlAne = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) PhyCtrlSpd100 = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) PhyCtrlSpd10 = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) PhyCtrlPhyBaseAddr = 0x1f000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) PhyCtrlDux = 0x800000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) PhyCtrlReset = 0x400000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) enum FlowCtrlConfigBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) FlowCtrlFullDX = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) FlowCtrlEnb = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) enum Config0Bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) Cfg0_Reset = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) Cfg0_Anaoff = 0x40000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) Cfg0_LDPS = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) enum Config1Bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) Cfg1_EarlyRx = 1 << 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) Cfg1_EarlyTx = 1 << 30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) //rx buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) Cfg1_Rcv8K = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) Cfg1_Rcv16K = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) Cfg1_Rcv32K = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) Cfg1_Rcv64K = 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) Cfg1_Rcv128K = 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) enum MiiCmd0Bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) Mii_Divider = 0x20000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) Mii_WRITE = 0x400000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) Mii_READ = 0x200000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) Mii_SCAN = 0x100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) Mii_Tamod = 0x80000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) Mii_Drvmod = 0x40000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) Mii_mdc = 0x20000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) Mii_mdoen = 0x10000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) Mii_mdo = 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) Mii_mdi = 0x4000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) enum MiiStatusBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) Mii_StatusBusy = 0x80000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enum PMConfigBits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) PM_Enable = 1 << 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) PM_LongWF = 1 << 30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) PM_Magic = 1 << 29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) PM_LANWake = 1 << 28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) PM_LWPTN = (1 << 27 | 1<< 26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) PM_LinkUp = 1 << 25,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) PM_WakeUp = 1 << 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Locking rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * priv->lock protects most of the fields of priv and most of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * hardware registers. It does not have to protect against softirqs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * between sc92031_disable_interrupts and sc92031_enable_interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * it also does not need to be used in ->open and ->stop while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * device interrupts are off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * Not having to protect against softirqs is very useful due to heavy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * use of mdelay() at _sc92031_reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Functions prefixed with _sc92031_ must be called with the lock held;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * functions prefixed with sc92031_ must be called without the lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Locking rules for the interrupt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * - the interrupt and the tasklet never run at the same time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * - neither run between sc92031_disable_interrupts and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * sc92031_enable_interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct sc92031_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* iomap.h cookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void __iomem *port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* pci device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* CPU address of rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* PCI address of rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dma_addr_t rx_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* PCI address of rx ring read pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) dma_addr_t rx_ring_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* tx ring write index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* tx ring read index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* CPU address of tx bounce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) void *tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* PCI address of tx bounce buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dma_addr_t tx_bufs_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /* copies of some hardware registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u32 intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) atomic_t intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u32 rx_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u32 tx_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) u32 pm_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* copy of some flags from dev->flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned int mc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* for ETHTOOL_GSTATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u64 tx_timeouts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u64 rx_loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* for dev->get_stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) long rx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* I don't know which registers can be safely read; however, I can guess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * MAC0 is one of them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static inline void _sc92031_dummy_read(void __iomem *port_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) ioread32(port_base + MAC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static u32 _sc92031_mii_wait(void __iomem *port_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 mii_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mii_status = ioread32(port_base + Miistatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } while (mii_status & Mii_StatusBusy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return mii_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) iowrite32(Mii_Divider, port_base + Miicmd0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) _sc92031_mii_wait(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) iowrite32(cmd1, port_base + Miicmd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return _sc92031_mii_wait(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void _sc92031_mii_scan(void __iomem *port_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) _sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) _sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void sc92031_disable_interrupts(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* tell the tasklet/interrupt not to enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) atomic_set(&priv->intr_mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* stop interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) iowrite32(0, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) _sc92031_dummy_read(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* wait for any concurrent interrupt/tasklet to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) synchronize_irq(priv->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tasklet_disable(&priv->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static void sc92031_enable_interrupts(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) tasklet_enable(&priv->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) atomic_set(&priv->intr_mask, IntrBits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) iowrite32(IntrBits, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void _sc92031_disable_tx_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) priv->rx_config &= ~RxEnb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) priv->tx_config &= ~TxEnb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) iowrite32(priv->rx_config, port_base + RxConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) iowrite32(priv->tx_config, port_base + TxConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void _sc92031_enable_tx_rx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) priv->rx_config |= RxEnb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) priv->tx_config |= TxEnb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) iowrite32(priv->rx_config, port_base + RxConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) iowrite32(priv->tx_config, port_base + TxConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void _sc92031_tx_clear(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) while (priv->tx_head - priv->tx_tail > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) priv->tx_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) priv->tx_head = priv->tx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void _sc92031_set_mar(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 mar0 = 0, mar1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if ((dev->flags & IFF_PROMISC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) netdev_mc_count(dev) > multicast_filter_limit ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) (dev->flags & IFF_ALLMULTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mar0 = mar1 = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) else if (dev->flags & IFF_MULTICAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) unsigned bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) crc = ~ether_crc(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) crc >>= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (crc & 0x01) bit |= 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (crc & 0x02) bit |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (crc & 0x10) bit |= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (crc & 0x20) bit |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (crc & 0x40) bit |= 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (crc & 0x80) bit |= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (bit > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mar0 |= 0x1 << (bit - 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mar1 |= 0x1 << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) iowrite32(mar0, port_base + MAR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) iowrite32(mar1, port_base + MAR0 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void _sc92031_set_rx_config(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned int old_mc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) u32 rx_config_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) old_mc_flags = priv->mc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) | RxMulticast | RxAllphys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rx_config_bits |= RxMulticast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (dev->flags & IFF_BROADCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rx_config_bits |= RxBroadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) | RxMulticast | RxAllphys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) priv->rx_config |= rx_config_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) | IFF_MULTICAST | IFF_BROADCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) iowrite32(priv->rx_config, port_base + RxConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static bool _sc92031_check_media(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bmsr = _sc92031_mii_read(port_base, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bool speed_100, duplex_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) u32 flow_ctrl_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u16 output_status = _sc92031_mii_read(port_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) MII_OutputStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) _sc92031_mii_scan(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) speed_100 = output_status & 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) duplex_full = output_status & 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Initial Tx/Rx configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) priv->tx_config = 0x48800000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /* NOTE: vendor driver had dead code here to enable tx padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!speed_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) priv->tx_config |= 0x80000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) // configure rx mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) _sc92031_set_rx_config(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (duplex_full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) priv->rx_config |= RxFullDx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) priv->tx_config |= TxFullDx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) priv->rx_config &= ~RxFullDx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) priv->tx_config &= ~TxFullDx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) _sc92031_set_mar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) _sc92031_set_rx_config(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) _sc92031_enable_tx_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) speed_100 ? "100" : "10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) duplex_full ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) _sc92031_mii_scan(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) _sc92031_disable_tx_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) printk(KERN_INFO "%s: link down\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static void _sc92031_phy_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) u32 phy_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) phy_ctrl = ioread32(port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) switch (media) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) case AUTOSELECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case M10_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) phy_ctrl |= PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) case M10_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) case M100_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) phy_ctrl |= PhyCtrlSpd100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) case M100_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) iowrite32(phy_ctrl, port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) phy_ctrl &= ~PhyCtrlReset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) iowrite32(phy_ctrl, port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) _sc92031_mii_write(port_base, MII_JAB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) PHY_16_JAB_ENB | PHY_16_PORT_ENB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) _sc92031_mii_scan(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static void _sc92031_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* disable PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) iowrite32(0, port_base + PMConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* soft reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) iowrite32(Cfg0_Reset, port_base + Config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) mdelay(200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) iowrite32(0, port_base + Config0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) iowrite32(0, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* clear multicast address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) iowrite32(0, port_base + MAR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) iowrite32(0, port_base + MAR0 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* init rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) priv->rx_ring_tail = priv->rx_ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* init tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) _sc92031_tx_clear(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* clear old register values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) priv->intr_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) atomic_set(&priv->intr_mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) priv->rx_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) priv->tx_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) priv->mc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* configure rx buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* NOTE: vendor driver had dead code here to enable early tx/rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) iowrite32(Cfg1_Rcv64K, port_base + Config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) _sc92031_phy_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) _sc92031_check_media(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* calculate rx fifo overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) priv->rx_value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* enable PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) iowrite32(priv->pm_config, port_base + PMConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* clear intr register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ioread32(port_base + IntrStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static void _sc92031_tx_tasklet(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned old_tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unsigned entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) u32 tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) old_tx_tail = priv->tx_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) while (priv->tx_head - priv->tx_tail > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) entry = priv->tx_tail % NUM_TX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) tx_status = ioread32(port_base + TxStatus0 + entry * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) priv->tx_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (tx_status & TxStatOK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dev->stats.tx_bytes += tx_status & 0x1fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Note: TxCarrierLost is always asserted at 100mbps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) dev->stats.collisions += (tx_status >> 22) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (tx_status & (TxOutOfWindow | TxAborted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (tx_status & TxAborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dev->stats.tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (tx_status & TxCarrierLost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (tx_status & TxOutOfWindow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dev->stats.tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (tx_status & TxUnderrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (priv->tx_tail != old_tx_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (netif_queue_stopped(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static void _sc92031_rx_tasklet_error(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u32 rx_status, unsigned rx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!(rx_status & RxStatesOK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (rx_status & (RxHugeFrame | RxSmallFrame))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (rx_status & RxBadAlign)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!(rx_status & RxCRCOK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) priv->rx_loss++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void _sc92031_rx_tasklet(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dma_addr_t rx_ring_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned rx_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned rx_ring_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) void *rx_ring = priv->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rx_ring_head = ioread32(port_base + RxBufWPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) /* rx_ring_head is only 17 bits in the RxBufWPtr register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * we need to change it to 32 bits physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (rx_ring_head < priv->rx_ring_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rx_ring_head += RX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (rx_ring_head >= priv->rx_ring_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rx_len = rx_ring_head - priv->rx_ring_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!rx_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (unlikely(rx_len > RX_BUF_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) printk(KERN_ERR "%s: rx packets length > rx buffer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) while (rx_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u32 rx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned rx_size, rx_size_align, pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) rx_size = rx_status >> 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) rx_size_align = (rx_size + 3) & ~3; // for 4 bytes aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) pkt_size = rx_size - 4; // Omit the four octet CRC from the length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (unlikely(rx_status == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) rx_size < 16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) !(rx_status & RxStatesOK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (unlikely(rx_size_align + 4 > rx_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) rx_len -= rx_size_align + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) skb = netdev_alloc_skb_ip_align(dev, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev->name, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) skb_put_data(skb, rx_ring + rx_ring_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) RX_BUF_LEN - rx_ring_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) skb_put_data(skb, rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) pkt_size - (RX_BUF_LEN - rx_ring_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) skb_put_data(skb, rx_ring + rx_ring_offset, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) dev->stats.rx_bytes += pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (rx_status & Rx_Multicast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) dev->stats.multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) priv->rx_ring_tail = rx_ring_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void _sc92031_link_tasklet(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (_sc92031_check_media(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dev->stats.tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) static void sc92031_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) struct sc92031_priv *priv = from_tasklet(priv, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct net_device *dev = priv->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 intr_status, intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) intr_status = priv->intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (unlikely(!netif_running(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (intr_status & TxOK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) _sc92031_tx_tasklet(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (intr_status & RxOK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) _sc92031_rx_tasklet(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (intr_status & RxOverflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (intr_status & TimeOut) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (intr_status & (LinkFail | LinkOK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) _sc92031_link_tasklet(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) intr_mask = atomic_read(&priv->intr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) iowrite32(intr_mask, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 intr_status, intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* mask interrupts before clearing IntrStatus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) iowrite32(0, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) _sc92031_dummy_read(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) intr_status = ioread32(port_base + IntrStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (unlikely(intr_status == 0xffffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return IRQ_NONE; // hardware has gone missing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) intr_status &= IntrBits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (!intr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) goto out_none;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) priv->intr_status = intr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) tasklet_schedule(&priv->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) out_none:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) intr_mask = atomic_read(&priv->intr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) iowrite32(intr_mask, port_base + IntrMask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) // FIXME I do not understand what is this trying to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Update the error count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (temp == 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) priv->rx_value += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) dev->stats.rx_fifo_errors = priv->rx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) dev->stats.rx_fifo_errors = temp + priv->rx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) u32 tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (unlikely(skb->len > TX_BUF_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (unlikely(!netif_carrier_ok(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) entry = priv->tx_head++ % NUM_TX_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (len < ETH_ZLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 0, ETH_ZLEN - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) len = ETH_ZLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (len < 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) tx_status = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) else if (len < 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) tx_status = 0x30000 | len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) tx_status = 0x50000 | len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) port_base + TxAddr0 + entry * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static int sc92031_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct pci_dev *pdev = priv->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) priv->rx_ring = dma_alloc_coherent(&pdev->dev, RX_BUF_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) &priv->rx_ring_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (unlikely(!priv->rx_ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) goto out_alloc_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) priv->tx_bufs = dma_alloc_coherent(&pdev->dev, TX_BUF_TOT_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) &priv->tx_bufs_dma_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (unlikely(!priv->tx_bufs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) goto out_alloc_tx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) priv->tx_head = priv->tx_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) err = request_irq(pdev->irq, sc92031_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) goto out_request_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) priv->pm_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Interrupts already disabled by sc92031_stop or sc92031_probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) _sc92031_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) sc92031_enable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (netif_carrier_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) out_request_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) priv->tx_bufs_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) out_alloc_tx_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) priv->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) out_alloc_rx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int sc92031_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct pci_dev *pdev = priv->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Disable interrupts, stop Tx and Rx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sc92031_disable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) _sc92031_disable_tx_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) _sc92031_tx_clear(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) free_irq(pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) priv->tx_bufs_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) priv->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void sc92031_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) _sc92031_set_mar(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) _sc92031_set_rx_config(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static void sc92031_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Disable interrupts by clearing the interrupt mask.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) sc92031_disable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) priv->tx_timeouts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) _sc92031_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) sc92031_enable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (netif_carrier_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void sc92031_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) const int irq = priv->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (sc92031_interrupt(irq, dev) != IRQ_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) sc92031_tasklet(&priv->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) sc92031_ethtool_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) u8 phy_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) u32 phy_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) u16 output_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) phy_address = ioread32(port_base + Miicmd1) >> 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) phy_ctrl = ioread32(port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) _sc92031_mii_scan(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) advertising = ADVERTISED_TP | ADVERTISED_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) == (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) advertising |= ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) advertising |= ADVERTISED_10baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) == (PhyCtrlSpd10 | PhyCtrlDux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) advertising |= ADVERTISED_10baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) advertising |= ADVERTISED_100baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) == (PhyCtrlSpd100 | PhyCtrlDux))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) advertising |= ADVERTISED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (phy_ctrl & PhyCtrlAne)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) advertising |= ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) cmd->base.speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) cmd->base.duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) cmd->base.port = PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) cmd->base.phy_address = phy_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) cmd->base.autoneg = (phy_ctrl & PhyCtrlAne) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) AUTONEG_ENABLE : AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) sc92031_ethtool_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) u32 phy_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) u32 old_phy_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) u32 advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ethtool_convert_link_mode_to_legacy_u32(&advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) cmd->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (!(speed == SPEED_10 || speed == SPEED_100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!(cmd->base.duplex == DUPLEX_HALF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) cmd->base.duplex == DUPLEX_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!(cmd->base.port == PORT_MII))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!(cmd->base.phy_address == 0x1f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (!(cmd->base.autoneg == AUTONEG_DISABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cmd->base.autoneg == AUTONEG_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!(advertising & (ADVERTISED_Autoneg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) | ADVERTISED_100baseT_Full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) | ADVERTISED_100baseT_Half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) | ADVERTISED_10baseT_Full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) | ADVERTISED_10baseT_Half)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) phy_ctrl = PhyCtrlAne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) // FIXME: I'm not sure what the original code was trying to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (advertising & ADVERTISED_Autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (advertising & ADVERTISED_100baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (advertising & ADVERTISED_100baseT_Half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) phy_ctrl |= PhyCtrlSpd100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (advertising & ADVERTISED_10baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (advertising & ADVERTISED_10baseT_Half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) phy_ctrl |= PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) // FIXME: Whole branch guessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) phy_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) phy_ctrl |= PhyCtrlSpd10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) else /* cmd->speed == SPEED_100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) phy_ctrl |= PhyCtrlSpd100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (cmd->base.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) phy_ctrl |= PhyCtrlDux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) old_phy_ctrl = ioread32(port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) | PhyCtrlSpd100 | PhyCtrlSpd10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (phy_ctrl != old_phy_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) iowrite32(phy_ctrl, port_base + PhyCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static void sc92031_ethtool_get_wol(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct ethtool_wolinfo *wolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) u32 pm_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) pm_config = ioread32(port_base + PMConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) // FIXME: Guessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) wolinfo->supported = WAKE_PHY | WAKE_MAGIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) wolinfo->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (pm_config & PM_LinkUp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) wolinfo->wolopts |= WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (pm_config & PM_Magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) wolinfo->wolopts |= WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (pm_config & PM_WakeUp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) // FIXME: Guessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) static int sc92031_ethtool_set_wol(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct ethtool_wolinfo *wolinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) u32 pm_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) pm_config = ioread32(port_base + PMConfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) & ~(PM_LinkUp | PM_Magic | PM_WakeUp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (wolinfo->wolopts & WAKE_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) pm_config |= PM_LinkUp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (wolinfo->wolopts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) pm_config |= PM_Magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) // FIXME: Guessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) pm_config |= PM_WakeUp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) priv->pm_config = pm_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) iowrite32(pm_config, port_base + PMConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static int sc92031_ethtool_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) void __iomem *port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bmcr = _sc92031_mii_read(port_base, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!(bmcr & BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) _sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) _sc92031_mii_scan(port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) "tx_timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) "rx_loss",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) static void sc92031_ethtool_get_strings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (stringset == ETH_SS_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) memcpy(data, sc92031_ethtool_stats_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) SILAN_STATS_NUM * ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return SILAN_STATS_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) data[0] = priv->tx_timeouts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) data[1] = priv->rx_loss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) static const struct ethtool_ops sc92031_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) .get_wol = sc92031_ethtool_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) .set_wol = sc92031_ethtool_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) .nway_reset = sc92031_ethtool_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .get_strings = sc92031_ethtool_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) .get_sset_count = sc92031_ethtool_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) .get_link_ksettings = sc92031_ethtool_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) .set_link_ksettings = sc92031_ethtool_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static const struct net_device_ops sc92031_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) .ndo_get_stats = sc92031_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) .ndo_start_xmit = sc92031_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) .ndo_open = sc92031_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .ndo_stop = sc92031_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) .ndo_set_rx_mode = sc92031_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .ndo_tx_timeout = sc92031_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .ndo_poll_controller = sc92031_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void __iomem* port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct sc92031_priv *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u32 mac0, mac1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) goto out_enable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) goto out_set_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) goto out_set_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) err = pci_request_regions(pdev, SC92031_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) goto out_request_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (unlikely(!port_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) goto out_iomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) dev = alloc_etherdev(sizeof(struct sc92031_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) if (unlikely(!dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto out_alloc_etherdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* faked with skb_copy_and_csum_dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) dev->netdev_ops = &sc92031_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dev->ethtool_ops = &sc92031_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) priv->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) priv->port_base = port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) priv->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) tasklet_setup(&priv->tasklet, sc92031_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /* Fudge tasklet count so the call to sc92031_enable_interrupts at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * sc92031_open will work correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tasklet_disable_nosync(&priv->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* PCI PM Wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) mac0 = ioread32(port_base + MAC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) mac1 = ioread32(port_base + MAC0 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dev->dev_addr[0] = mac0 >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) dev->dev_addr[1] = mac0 >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) dev->dev_addr[2] = mac0 >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) dev->dev_addr[3] = mac0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) dev->dev_addr[4] = mac1 >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) dev->dev_addr[5] = mac1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto out_register_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) out_register_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) out_alloc_etherdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) pci_iounmap(pdev, port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) out_iomap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) out_request_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) out_set_dma_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) out_enable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static void sc92031_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) void __iomem* port_base = priv->port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) pci_iounmap(pdev, port_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static int __maybe_unused sc92031_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Disable interrupts, stop Tx and Rx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) sc92031_disable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) _sc92031_disable_tx_rx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) _sc92031_tx_clear(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int __maybe_unused sc92031_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct sc92031_priv *priv = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* Interrupts already disabled by sc92031_suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) spin_lock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) _sc92031_reset(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) spin_unlock_bh(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) sc92031_enable_interrupts(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (netif_carrier_ok(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static const struct pci_device_id sc92031_pci_device_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) { PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) { PCI_DEVICE(0x1088, 0x2031) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static SIMPLE_DEV_PM_OPS(sc92031_pm_ops, sc92031_suspend, sc92031_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static struct pci_driver sc92031_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) .name = SC92031_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) .id_table = sc92031_pci_device_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) .probe = sc92031_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) .remove = sc92031_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) .driver.pm = &sc92031_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) module_pci_driver(sc92031_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");