^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004 Advanced Micro Devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 1993 United States Government as represented by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Director, National Security Agency.[ pcnet32.c ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) Module Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) amd8111e.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) Abstract:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) AMD8111 based 10/100 Ethernet Controller Driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) Environment:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) Kernel Mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) Revision History:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) 3.0.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) Initial Revision.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) 3.0.1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) 1. Dynamic interrupt coalescing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) 2. Removed prev_stats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) 3. MII support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) 4. Dynamic IPG support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) 3.0.2 05/29/2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) 2. Bug fix: Fixed VLAN support failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) 3. Bug fix: Fixed receive interrupt coalescing bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 4. Dynamic IPG support is disabled by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) 3.0.3 06/05/2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) 3.0.4 12/09/2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 1. Added set_mac_address routine for bonding driver support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) 2. Tested the driver for bonding support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) indicated to the h/w.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) 4. Modified amd8111e_rx() routine to receive all the received packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) in the first interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) 5. Bug fix: Corrected rx_errors reported in get_stats() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) 3.0.5 03/22/2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 1. Added NAPI support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/ctype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #if IS_ENABLED(CONFIG_VLAN_8021Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define AMD8111E_VLAN_TAG_USED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define AMD8111E_VLAN_TAG_USED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include "amd8111e.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define MODULE_NAME "amd8111e"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MODULE_AUTHOR("Advanced Micro Devices, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MODULE_DESCRIPTION("AMD8111 based 10/100 Ethernet Controller.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) module_param_array(speed_duplex, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) module_param_array(coalesce, bool, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) module_param_array(dynamic_ipg, bool, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* This function will read the PHY registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static int amd8111e_read_phy(struct amd8111e_priv *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int phy_id, int reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int repeat= REPEAT_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) reg_val = readl(mmio + PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) while (reg_val & PHY_CMD_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) reg_val = readl( mmio + PHY_ACCESS );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) do{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) reg_val = readl(mmio + PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) udelay(30); /* It takes 30 us to read/write data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if(reg_val & PHY_RD_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto err_phy_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *val = reg_val & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) err_phy_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* This function will write into PHY registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int amd8111e_write_phy(struct amd8111e_priv *lp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int phy_id, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int repeat = REPEAT_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) reg_val = readl(mmio + PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) while (reg_val & PHY_CMD_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) reg_val = readl( mmio + PHY_ACCESS );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) do{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) reg_val = readl(mmio + PHY_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) udelay(30); /* It takes 30 us to read/write the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if(reg_val & PHY_RD_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto err_phy_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) err_phy_write:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* This is the mii register read function provided to the mii interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) amd8111e_read_phy(lp,phy_id,reg_num,®_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* This is the mii register write function provided to the mii interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void amd8111e_mdio_write(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int phy_id, int reg_num, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) amd8111e_write_phy(lp, phy_id, reg_num, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /* This function will set PHY speed. During initialization sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * the original speed to 100 full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void amd8111e_set_ext_phy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u32 bmcr,advert,tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Determine mii register values to set the speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) switch (lp->ext_phy_option){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case SPEED_AUTONEG: /* advertise all values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ADVERTISE_100HALF|ADVERTISE_100FULL) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case SPEED10_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) tmp |= ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) case SPEED10_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tmp |= ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) case SPEED100_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) tmp |= ADVERTISE_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case SPEED100_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) tmp |= ADVERTISE_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if(advert != tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Restart auto negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* This function will unmap skb->data space and will free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * all transmit and receive skbuffs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static int amd8111e_free_skbs(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct sk_buff *rx_skbuff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* Freeing transmit skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) for(i = 0; i < NUM_TX_BUFFERS; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if(lp->tx_skbuff[i]){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) lp->tx_dma_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev_kfree_skb (lp->tx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) lp->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) lp->tx_dma_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Freeing previously allocated receive buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) for (i = 0; i < NUM_RX_BUFFERS; i++){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rx_skbuff = lp->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if(rx_skbuff != NULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) lp->rx_dma_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) lp->rx_buff_len - 2, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) dev_kfree_skb(lp->rx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) lp->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) lp->rx_dma_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* This will set the receive buffer length corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * to the mtu size of networkinterface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) unsigned int mtu = dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (mtu > ETH_DATA_LEN){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* MTU + ethernet header + FCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * + optional VLAN tag + skb reserve space 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) lp->rx_buff_len = mtu + ETH_HLEN + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) lp->options |= OPTION_JUMBO_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) } else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) lp->rx_buff_len = PKT_BUFF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) lp->options &= ~OPTION_JUMBO_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* This function will free all the previously allocated buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * determine new receive buffer length and will allocate new receive buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * This function also allocates and initializes both the transmitter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * and receive hardware descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int amd8111e_init_ring(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) lp->rx_idx = lp->tx_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) lp->tx_complete_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) lp->tx_ring_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if(lp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Free previously allocated transmit and receive skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) amd8111e_free_skbs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* allocate the tx and rx descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) &lp->tx_ring_dma_addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!lp->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) goto err_no_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) &lp->rx_ring_dma_addr, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!lp->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) goto err_free_tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Set new receive buff size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) amd8111e_set_rx_buff_len(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Allocating receive skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) for (i = 0; i < NUM_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!lp->rx_skbuff[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Release previos allocated skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) for(--i; i >= 0 ;i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dev_kfree_skb(lp->rx_skbuff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto err_free_rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) skb_reserve(lp->rx_skbuff[i],2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Initilaizing receive descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) for (i = 0; i < NUM_RX_BUFFERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) lp->rx_skbuff[i]->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) lp->rx_buff_len - 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Initializing transmit descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) for (i = 0; i < NUM_TX_RING_DR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) lp->tx_ring[i].buff_phy_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) lp->tx_ring[i].tx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) lp->tx_ring[i].buff_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) err_free_rx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) lp->rx_ring, lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) err_free_tx_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) lp->tx_ring, lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) err_no_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* This function will set the interrupt coalescing according
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * to the input arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) unsigned int timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) unsigned int event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) switch(cmod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case RX_INTR_COAL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) timeout = coal_conf->rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) event_count = coal_conf->rx_event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if( timeout > MAX_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) event_count > MAX_EVENT_COUNT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) timeout = timeout * DELAY_TIMER_CONV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) writel(VAL0|STINTEN, mmio+INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) mmio+DLY_INT_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) case TX_INTR_COAL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) timeout = coal_conf->tx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) event_count = coal_conf->tx_event_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if( timeout > MAX_TIMEOUT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) event_count > MAX_EVENT_COUNT )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) timeout = timeout * DELAY_TIMER_CONV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) writel(VAL0|STINTEN,mmio+INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mmio+DLY_INT_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case DISABLE_COAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) writel(0,mmio+STVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) writel(STINTEN, mmio+INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) writel(0, mmio +DLY_INT_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) writel(0, mmio+DLY_INT_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case ENABLE_COAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Start the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) writel(VAL0|STINTEN, mmio+INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* This function initializes the device registers and starts the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static int amd8111e_restart(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int i,reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /* stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) writel(RUN, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if(amd8111e_init_ring(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* enable the port manager and set auto negotiation always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) amd8111e_set_ext_phy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* set control registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) reg_val = readl(mmio + CTRL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) reg_val &= ~XMTSP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /* initialize tx and rx ring base addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* set default IPG to 96 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) writew((u32)DEFAULT_IPG,mmio+IPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if(lp->options & OPTION_JUMBO_ENABLE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) writel((u32)VAL2|JUMBO, mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /* Reset REX_UFLO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) writel( REX_UFLO, mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* Should not set REX_UFLO for jumbo frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) writel((u32)JUMBO, mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Setting the MAC address to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) for (i = 0; i < ETH_ALEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) writeb( dev->dev_addr[i], mmio + PADR + i );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Enable interrupt coalesce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if(lp->options & OPTION_INTR_COAL_ENABLE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) netdev_info(dev, "Interrupt Coalescing Enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) amd8111e_set_coalesce(dev,ENABLE_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* set RUN bit to start the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) writel(VAL2 | RDMD0, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) writel(VAL0 | INTREN | RUN, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* To avoid PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) readl(mmio+CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* This function clears necessary the device registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned int reg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned int logic_filter[2] ={0,};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) writel(RUN, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Clear RCV_RING_BASE_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) writel(0, mmio + RCV_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Clear XMT_RING_BASE_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) writel(0, mmio + XMT_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) writel(0, mmio + XMT_RING_BASE_ADDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) writel(0, mmio + XMT_RING_BASE_ADDR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) writel(0, mmio + XMT_RING_BASE_ADDR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Clear CMD0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) writel(CMD0_CLEAR,mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /* Clear CMD2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) writel(CMD2_CLEAR, mmio +CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Clear CMD7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) writel(CMD7_CLEAR , mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* Clear DLY_INT_A and DLY_INT_B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) writel(0x0, mmio + DLY_INT_A);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) writel(0x0, mmio + DLY_INT_B);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* Clear FLOW_CONTROL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) writel(0x0, mmio + FLOW_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* Clear INT0 write 1 to clear register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) reg_val = readl(mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) writel(reg_val, mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Clear STVAL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) writel(0x0, mmio + STVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* Clear INTEN0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) writel( INTEN0_CLEAR, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /* Clear LADRF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) writel(0x0 , mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) writel( 0x80010,mmio + SRAM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* Clear RCV_RING0_LEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) writel(0x0, mmio + RCV_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Clear XMT_RING0/1/2/3_LEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) writel(0x0, mmio + XMT_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) writel(0x0, mmio + XMT_RING_LEN1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) writel(0x0, mmio + XMT_RING_LEN2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) writel(0x0, mmio + XMT_RING_LEN3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Clear XMT_RING_LIMIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) writel(0x0, mmio + XMT_RING_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Clear MIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) writew(MIB_CLEAR, mmio + MIB_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Clear LARF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* SRAM_SIZE register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) reg_val = readl(mmio + SRAM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if(lp->options & OPTION_JUMBO_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) writel( VAL2|JUMBO, mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Set default value to CTRL1 Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) writel(CTRL1_DEFAULT, mmio + CTRL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* To avoid PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) readl(mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* This function disables the interrupt and clears all the pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * interrupts in INT0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u32 intr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) writel(INTREN, lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) /* Clear INT0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) intr0 = readl(lp->mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) writel(intr0, lp->mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* To avoid PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) readl(lp->mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* This function stops the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void amd8111e_stop_chip(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) writel(RUN, lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* To avoid PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) readl(lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* This function frees the transmiter and receiver descriptor rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static void amd8111e_free_ring(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* Free transmit and receive descriptor rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if(lp->rx_ring){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) lp->rx_ring, lp->rx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) lp->rx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if(lp->tx_ring){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dma_free_coherent(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) lp->tx_ring, lp->tx_ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) lp->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* This function will free all the transmit skbs that are actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * transmitted by the device. It will check the ownership of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * skb before freeing the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static int amd8111e_tx(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int tx_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Complete all the transmit packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) while (lp->tx_complete_idx != lp->tx_idx){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if(status & OWN_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) break; /* It still hasn't been Txed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) lp->tx_ring[tx_index].buff_phy_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* We must free the original skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (lp->tx_skbuff[tx_index]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_unmap_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) lp->tx_dma_addr[tx_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) lp->tx_skbuff[tx_index]->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) lp->tx_skbuff[tx_index] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) lp->tx_dma_addr[tx_index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) lp->tx_complete_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*COAL update tx coalescing parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) lp->coal_conf.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) lp->coal_conf.tx_bytes +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) le16_to_cpu(lp->tx_ring[tx_index].buff_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* The ring is no longer full, clear tbusy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* lp->tx_full = 0; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* This function handles the driver receive operation in polling mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct net_device *dev = lp->amd8111e_net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct sk_buff *skb,*new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int min_pkt_len, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int num_rx_pkt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) short pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) short vtag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) while (num_rx_pkt < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (status & OWN_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* There is a tricky error noted by John Murphy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * <murf@perftech.com> to Russ Nelson: Even with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * full-sized * buffers it's possible for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * jabber packet to use two buffers, with only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * the last correctly noting the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (status & ERR_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* resetting flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto err_next_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* check for STP and ENP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (!((status & STP_BIT) && (status & ENP_BIT))){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* resetting flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto err_next_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) vtag = status & TT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* MAC will strip vlan tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (vtag != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) min_pkt_len = MIN_PKT_LEN - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) min_pkt_len = MIN_PKT_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (pkt_len < min_pkt_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) lp->drv_rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto err_next_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (!new_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* if allocation fail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * ignore that pkt and go to next one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) lp->drv_rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) goto err_next_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) skb_reserve(new_skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) skb = lp->rx_skbuff[rx_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) lp->rx_buff_len - 2, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) skb_put(skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) lp->rx_skbuff[rx_index] = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) new_skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) lp->rx_buff_len - 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (vtag == TT_VLAN_TAGGED){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) napi_gro_receive(napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* COAL update rx coalescing parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) lp->coal_conf.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) lp->coal_conf.rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) num_rx_pkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) err_next_pkt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) lp->rx_ring[rx_index].buff_phy_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) = cpu_to_le32(lp->rx_dma_addr[rx_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) lp->rx_ring[rx_index].buff_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) cpu_to_le16(lp->rx_buff_len-2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /* Receive descriptor is empty now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) writel(VAL0|RINTEN0, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) writel(VAL2 | RDMD0, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return num_rx_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* This function will indicate the link status to the kernel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int amd8111e_link_change(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int status0,speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* read the link change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) status0 = readl(lp->mmio + STAT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if(status0 & LINK_STATS){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if(status0 & AUTONEG_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) lp->link_config.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) lp->link_config.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if(status0 & FULL_DPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) lp->link_config.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) lp->link_config.duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) speed = (status0 & SPEED_MASK) >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if(speed == PHY_SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) lp->link_config.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) else if(speed == PHY_SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) lp->link_config.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) (lp->link_config.speed == SPEED_100) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) "100" : "10",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) (lp->link_config.duplex == DUPLEX_FULL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) "Full" : "Half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) lp->link_config.speed = SPEED_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) lp->link_config.duplex = DUPLEX_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) lp->link_config.autoneg = AUTONEG_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) netdev_info(dev, "Link is Down.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /* This function reads the mib counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unsigned int data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned int repeat = REPEAT_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) status = readw(mmio + MIB_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) udelay(2); /* controller takes MAX 2 us to get mib data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) while (--repeat && (status & MIB_CMD_ACTIVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) data = readl(mmio + MIB_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* This function reads the mib registers and returns the hardware statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * It updates previous internal driver statistics with new values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct net_device_stats *new_stats = &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (!lp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return new_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_lock_irqsave (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* stats.rx_packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) amd8111e_read_mib(mmio, rcv_multicast_pkts)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) amd8111e_read_mib(mmio, rcv_unicast_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* stats.tx_packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*stats.rx_bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* stats.tx_bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* stats.rx_errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* hw errors + errors driver reported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) amd8111e_read_mib(mmio, rcv_fragments)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) amd8111e_read_mib(mmio, rcv_jabbers)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) amd8111e_read_mib(mmio, rcv_alignment_errors)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) amd8111e_read_mib(mmio, rcv_fcs_errors)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) amd8111e_read_mib(mmio, rcv_miss_pkts)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) lp->drv_rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* stats.tx_errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /* stats.rx_dropped*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* stats.tx_dropped*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* stats.multicast*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* stats.collisions*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* stats.rx_length_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) new_stats->rx_length_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) amd8111e_read_mib(mmio, rcv_undersize_pkts)+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) amd8111e_read_mib(mmio, rcv_oversize_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* stats.rx_over_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /* stats.rx_crc_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* stats.rx_frame_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) new_stats->rx_frame_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) amd8111e_read_mib(mmio, rcv_alignment_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* stats.rx_fifo_errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* stats.rx_missed_errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* stats.tx_aborted_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) new_stats->tx_aborted_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) amd8111e_read_mib(mmio, xmt_excessive_collision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) /* stats.tx_carrier_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) new_stats->tx_carrier_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) amd8111e_read_mib(mmio, xmt_loss_carrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* stats.tx_fifo_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* stats.tx_window_errors*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) new_stats->tx_window_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) amd8111e_read_mib(mmio, xmt_late_collision);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Reset the mibs for collecting new statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) spin_unlock_irqrestore (&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) return new_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* This function recalculate the interrupt coalescing mode on every interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * according to the datarate and the packet rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static int amd8111e_calc_coalesce(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int tx_pkt_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int rx_pkt_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int tx_data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int rx_data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int rx_pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int tx_pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) coal_conf->tx_prev_packets = coal_conf->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) coal_conf->rx_prev_packets = coal_conf->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if(rx_pkt_rate < 800){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if(coal_conf->rx_coal_type != NO_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) coal_conf->rx_timeout = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) coal_conf->rx_event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) amd8111e_set_coalesce(dev,RX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) coal_conf->rx_coal_type = NO_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rx_pkt_size = rx_data_rate/rx_pkt_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (rx_pkt_size < 128){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if(coal_conf->rx_coal_type != NO_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) coal_conf->rx_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) coal_conf->rx_event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) amd8111e_set_coalesce(dev,RX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) coal_conf->rx_coal_type = NO_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if(coal_conf->rx_coal_type != LOW_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) coal_conf->rx_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) coal_conf->rx_event_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) amd8111e_set_coalesce(dev,RX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) coal_conf->rx_coal_type = LOW_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) coal_conf->rx_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) coal_conf->rx_event_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) amd8111e_set_coalesce(dev,RX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) coal_conf->rx_coal_type = MEDIUM_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) else if(rx_pkt_size >= 1024){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if(coal_conf->rx_coal_type != HIGH_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) coal_conf->rx_timeout = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) coal_conf->rx_event_count = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) amd8111e_set_coalesce(dev,RX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) coal_conf->rx_coal_type = HIGH_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* NOW FOR TX INTR COALESC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if(tx_pkt_rate < 800){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) if(coal_conf->tx_coal_type != NO_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) coal_conf->tx_timeout = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) coal_conf->tx_event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) amd8111e_set_coalesce(dev,TX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) coal_conf->tx_coal_type = NO_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) tx_pkt_size = tx_data_rate/tx_pkt_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (tx_pkt_size < 128){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if(coal_conf->tx_coal_type != NO_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) coal_conf->tx_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) coal_conf->tx_event_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) amd8111e_set_coalesce(dev,TX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) coal_conf->tx_coal_type = NO_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if(coal_conf->tx_coal_type != LOW_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) coal_conf->tx_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) coal_conf->tx_event_count = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) amd8111e_set_coalesce(dev,TX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) coal_conf->tx_coal_type = LOW_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) coal_conf->tx_timeout = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) coal_conf->tx_event_count = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) amd8111e_set_coalesce(dev,TX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) coal_conf->tx_coal_type = MEDIUM_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) } else if (tx_pkt_size >= 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (coal_conf->tx_coal_type != HIGH_COALESCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) coal_conf->tx_timeout = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) coal_conf->tx_event_count = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) amd8111e_set_coalesce(dev, TX_INTR_COAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) coal_conf->tx_coal_type = HIGH_COALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* This is device interrupt function. It handles transmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * receive,link change and hardware timer interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct net_device *dev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) unsigned int intr0, intren0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) unsigned int handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if(unlikely(dev == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) spin_lock(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* disabling interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) writel(INTREN, mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /* Read interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) intr0 = readl(mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) intren0 = readl(mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* Process all the INT event until INTR bit is clear. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (!(intr0 & INTR)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto err_no_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) writel(intr0, mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* Check if Receive Interrupt has occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (intr0 & RINT0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (napi_schedule_prep(&lp->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /* Disable receive interupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) writel(RINTEN0, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Schedule a polling routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) __napi_schedule(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) } else if (intren0 & RINTEN0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Fix by disable receive interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) writel(RINTEN0, mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* Check if Transmit Interrupt has occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (intr0 & TINT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) amd8111e_tx(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Check if Link Change Interrupt has occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (intr0 & LCINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) amd8111e_link_change(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Check if Hardware Timer Interrupt has occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (intr0 & STINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) amd8111e_calc_coalesce(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) err_no_interrupt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) writel( VAL0 | INTREN,mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) spin_unlock(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static void amd8111e_poll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) amd8111e_interrupt(0, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /* This function closes the network interface and updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * the statistics so that most recent statistics will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * available after the interface is down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int amd8111e_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) napi_disable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) amd8111e_disable_interrupt(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) amd8111e_stop_chip(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* Free transmit and receive skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) amd8111e_free_skbs(lp->amd8111e_net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) netif_carrier_off(lp->amd8111e_net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /* Delete ipg timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if(lp->options & OPTION_DYN_IPG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) del_timer_sync(&lp->ipg_data.ipg_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) amd8111e_free_ring(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /* Update the statistics before closing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) amd8111e_get_stats(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) lp->opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* This function opens new interface.It requests irq for the device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * initializes the device,buffers and descriptors, and starts the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) static int amd8111e_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) dev->name, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) napi_enable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) amd8111e_init_hw_default(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if(amd8111e_restart(dev)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) napi_disable(&lp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Start ipg timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if(lp->options & OPTION_DYN_IPG_ENABLE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) add_timer(&lp->ipg_data.ipg_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) netdev_info(dev, "Dynamic IPG Enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) lp->opened = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* This function checks if there is any transmit descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * available to queue more packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (lp->tx_skbuff[tx_index])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* This function will queue the transmit packets to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * descriptors and will trigger the send operation. It also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * initializes the transmit descriptors with buffer physical address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * byte count, ownership to hardware etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) int tx_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) spin_lock_irqsave(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) lp->tx_skbuff[tx_index] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) lp->tx_ring[tx_index].tx_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) lp->tx_ring[tx_index].tag_ctrl_cmd |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) cpu_to_le16(TCC_VLAN_INSERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) lp->tx_ring[tx_index].tag_ctrl_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cpu_to_le16(skb_vlan_tag_get(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) lp->tx_dma_addr[tx_index] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) lp->tx_ring[tx_index].buff_phy_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) cpu_to_le32(lp->tx_dma_addr[tx_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* Set FCS and LTINT bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) lp->tx_ring[tx_index].tx_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) lp->tx_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Trigger an immediate send poll. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) writel( VAL1 | TDMD0, lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) writel( VAL2 | RDMD0,lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if(amd8111e_tx_queue_avail(lp) < 0){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_unlock_irqrestore(&lp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* This function returns all the memory mapped registers of the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Read only necessary registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) buf[1] = readl(mmio + XMT_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) buf[3] = readl(mmio + RCV_RING_LEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) buf[4] = readl(mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) buf[5] = readl(mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) buf[6] = readl(mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) buf[7] = readl(mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) buf[8] = readl(mmio + INT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) buf[9] = readl(mmio + INTEN0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) buf[10] = readl(mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) buf[11] = readl(mmio + LADRF+4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) buf[12] = readl(mmio + STAT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /* This function sets promiscuos mode, all-multi mode or the multicast address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * list to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static void amd8111e_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) u32 mc_filter[2] ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int bit_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if(dev->flags & IFF_PROMISC){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) writel( VAL2 | PROM, lp->mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) writel( PROM, lp->mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (dev->flags & IFF_ALLMULTI ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) netdev_mc_count(dev) > MAX_FILTER_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* get all multicast packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) mc_filter[1] = mc_filter[0] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) lp->options |= OPTION_MULTICAST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* get only own packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) mc_filter[1] = mc_filter[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) lp->options &= ~OPTION_MULTICAST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* disable promiscuous mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) writel(PROM, lp->mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* load all the multicast addresses in the logic filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) lp->options |= OPTION_MULTICAST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) mc_filter[1] = mc_filter[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* To eliminate PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) readl(lp->mmio + CMD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static void amd8111e_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) struct pci_dev *pci_dev = lp->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) snprintf(info->fw_version, sizeof(info->fw_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) "%u", chip_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int amd8111e_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return AMD8111E_REG_DUMP_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) regs->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) amd8111e_read_regs(lp, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static int amd8111e_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static int amd8111e_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) res = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int amd8111e_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return mii_nway_restart(&lp->mii_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static u32 amd8111e_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return mii_link_ok(&lp->mii_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) wol_info->supported = WAKE_MAGIC|WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (lp->options & OPTION_WOL_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) wol_info->wolopts = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (wol_info->wolopts & WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) lp->options |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) else if(wol_info->wolopts & WAKE_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) lp->options |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) lp->options &= ~OPTION_WOL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static const struct ethtool_ops ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) .get_drvinfo = amd8111e_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) .get_regs_len = amd8111e_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) .get_regs = amd8111e_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) .nway_reset = amd8111e_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) .get_link = amd8111e_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .get_wol = amd8111e_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) .set_wol = amd8111e_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .get_link_ksettings = amd8111e_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) .set_link_ksettings = amd8111e_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* This function handles all the ethtool ioctls. It gives driver info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * gets/sets driver speed, gets memory mapped register values, forces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * auto negotiation, sets/gets WOL options for ethtool application.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct mii_ioctl_data *data = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) u32 mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) switch(cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) case SIOCGMIIPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) data->phy_id = lp->ext_phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case SIOCGMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) err = amd8111e_read_phy(lp, data->phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) data->val_out = mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) case SIOCSMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) err = amd8111e_write_phy(lp, data->phy_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) static int amd8111e_set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) /* Setting the MAC address to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) for (i = 0; i < ETH_ALEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) writeb( dev->dev_addr[i], lp->mmio + PADR + i );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* This function changes the mtu of the device. It restarts the device to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * initialize the descriptor with new receive buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (!netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /* new_mtu will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * when device starts netxt time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) writel(RUN, lp->mmio + CMD0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) err = amd8111e_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if(!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) writel( VAL1|MPPLBA, lp->mmio + CMD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) writel( VAL0|MPEN_SW, lp->mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /* To eliminate PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) readl(lp->mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* Adapter is already stoped/suspended/interrupt-disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* To eliminate PCI posting bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) readl(lp->mmio + CMD7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* This function is called when a packet transmission fails to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * within a reasonable period, on the assumption that an interrupt have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * failed or the interface is locked up. This function will reinitialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) netdev_err(dev, "transmit timed out, resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) err = amd8111e_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if(!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) static int __maybe_unused amd8111e_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /* disable the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) amd8111e_disable_interrupt(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) /* stop chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if(lp->options & OPTION_DYN_IPG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) del_timer_sync(&lp->ipg_data.ipg_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) amd8111e_stop_chip(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if(lp->options & OPTION_WOL_ENABLE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /* enable wol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) amd8111e_enable_magicpkt(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if(lp->options & OPTION_WAKE_PHY_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) amd8111e_enable_link_change(lp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) device_set_wakeup_enable(dev_d, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) device_set_wakeup_enable(dev_d, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static int __maybe_unused amd8111e_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) spin_lock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) amd8111e_restart(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) /* Restart ipg timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if(lp->options & OPTION_DYN_IPG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) mod_timer(&lp->ipg_data.ipg_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) jiffies + IPG_CONVERGE_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) spin_unlock_irq(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void amd8111e_config_ipg(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct ipg_info *ipg_data = &lp->ipg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) void __iomem *mmio = lp->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) unsigned int prev_col_cnt = ipg_data->col_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) unsigned int total_col_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) unsigned int tmp_ipg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if(lp->link_config.duplex == DUPLEX_FULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) ipg_data->ipg = DEFAULT_IPG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if(ipg_data->ipg_state == SSTATE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if(ipg_data->timer_tick == IPG_STABLE_TIME){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) ipg_data->timer_tick = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ipg_data->ipg = MIN_IPG - IPG_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ipg_data->current_ipg = MIN_IPG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) ipg_data->diff_col_cnt = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) ipg_data->ipg_state = CSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ipg_data->timer_tick++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if(ipg_data->ipg_state == CSTATE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /* Get the current collision count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) total_col_cnt = ipg_data->col_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) amd8111e_read_mib(mmio, xmt_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if ((total_col_cnt - prev_col_cnt) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) (ipg_data->diff_col_cnt)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) ipg_data->diff_col_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) total_col_cnt - prev_col_cnt ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) ipg_data->ipg = ipg_data->current_ipg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ipg_data->current_ipg += IPG_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (ipg_data->current_ipg <= MAX_IPG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) tmp_ipg = ipg_data->current_ipg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) else{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) tmp_ipg = ipg_data->ipg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) ipg_data->ipg_state = SSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) writew((u32)tmp_ipg, mmio + IPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static void amd8111e_probe_ext_phy(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct amd8111e_priv *lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) for (i = 0x1e; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) u32 id1, id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) lp->ext_phy_id = (id1 << 16) | id2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) lp->ext_phy_addr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) lp->ext_phy_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) lp->ext_phy_addr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static const struct net_device_ops amd8111e_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) .ndo_open = amd8111e_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) .ndo_stop = amd8111e_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) .ndo_start_xmit = amd8111e_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) .ndo_tx_timeout = amd8111e_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) .ndo_get_stats = amd8111e_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) .ndo_set_rx_mode = amd8111e_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) .ndo_set_mac_address = amd8111e_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) .ndo_do_ioctl = amd8111e_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) .ndo_change_mtu = amd8111e_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) .ndo_poll_controller = amd8111e_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int amd8111e_probe_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) unsigned long reg_addr,reg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) struct amd8111e_priv *lp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if(err){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) dev_err(&pdev->dev, "Cannot enable new PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) dev_err(&pdev->dev, "Cannot find PCI base address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) goto err_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) err = pci_request_regions(pdev, MODULE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if(err){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) goto err_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) /* Find power-management capability. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (!pdev->pm_cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) dev_err(&pdev->dev, "No Power Management capability\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) goto err_free_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) /* Initialize DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) dev_err(&pdev->dev, "DMA not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) goto err_free_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) reg_addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) reg_len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) dev = alloc_etherdev(sizeof(struct amd8111e_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) goto err_free_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) lp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) lp->pci_dev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) lp->amd8111e_net_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) lp->pm_cap = pdev->pm_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) spin_lock_init(&lp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (!lp->mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) dev_err(&pdev->dev, "Cannot map device registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) /* Initializing MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) for (i = 0; i < ETH_ALEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) dev->dev_addr[i] = readb(lp->mmio + PADR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* Setting user defined parametrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) lp->ext_phy_option = speed_duplex[card_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if(coalesce[card_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) lp->options |= OPTION_INTR_COAL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if(dynamic_ipg[card_idx++])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) lp->options |= OPTION_DYN_IPG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* Initialize driver entry points */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) dev->netdev_ops = &amd8111e_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) dev->ethtool_ops = &ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) dev->irq =pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev->min_mtu = AMD8111E_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) dev->max_mtu = AMD8111E_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) #if AMD8111E_VLAN_TAG_USED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) /* Probe the external PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) amd8111e_probe_ext_phy(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* setting mii default values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) lp->mii_if.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) lp->mii_if.mdio_read = amd8111e_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) lp->mii_if.mdio_write = amd8111e_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) lp->mii_if.phy_id = lp->ext_phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* Set receive buffer length and set jumbo option*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) amd8111e_set_rx_buff_len(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) dev_err(&pdev->dev, "Cannot register net device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /* Initialize software ipg timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if(lp->options & OPTION_DYN_IPG_ENABLE){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) lp->ipg_data.ipg_timer.expires = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) IPG_CONVERGE_JIFFIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) lp->ipg_data.ipg = DEFAULT_IPG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) lp->ipg_data.ipg_state = CSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) /* display driver and device information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) chip_version, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (lp->ext_phy_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) lp->ext_phy_id, lp->ext_phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) err_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) err_free_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) err_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) static void amd8111e_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static const struct pci_device_id amd8111e_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) .vendor = PCI_VENDOR_ID_AMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) .device = PCI_DEVICE_ID_AMD8111E_7462,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) .vendor = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static struct pci_driver amd8111e_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) .name = MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) .id_table = amd8111e_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) .probe = amd8111e_probe_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) .remove = amd8111e_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) .driver.pm = &amd8111e_pm_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) module_pci_driver(amd8111e_driver);