Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * sungem.c: Sun GEM ethernet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Support for Apple GMAC and assorted PHYs, WOL, Power Management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * NAPI and NETPOLL support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #ifdef CONFIG_SPARC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <asm/idprom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/pmac_feature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/sungem_phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include "sungem.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define STRIP_FCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define DEFAULT_MSG	(NETIF_MSG_DRV		| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 			 NETIF_MSG_PROBE	| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 			 NETIF_MSG_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define ADVERTISE_MASK	(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 			 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 			 SUPPORTED_Pause | SUPPORTED_Autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define DRV_NAME	"sungem"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define DRV_VERSION	"1.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define DRV_AUTHOR	"David S. Miller <davem@redhat.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)         DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) MODULE_AUTHOR(DRV_AUTHOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define GEM_MODULE_NAME	"gem"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static const struct pci_device_id gem_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	/* These models only differ from the original GEM in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * that their tx/rx fifos are of a different size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * they only support 10/100 speeds. -DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * Apple's GMAC does support gigabit on machines with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 * the BCM54xx PHYs. -BenH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	{ PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	{0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	int limit = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	cmd  = (1 << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	cmd |= (2 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	cmd |= (reg << 18) & MIF_FRAME_REGAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	cmd |= (MIF_FRAME_TAMSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	writel(cmd, gp->regs + MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	while (--limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		cmd = readl(gp->regs + MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		if (cmd & MIF_FRAME_TALSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		cmd = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	return cmd & MIF_FRAME_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	return __sungem_phy_read(gp, mii_id, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static inline u16 sungem_phy_read(struct gem *gp, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	return __sungem_phy_read(gp, gp->mii_phy_addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	int limit = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	cmd  = (1 << 30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	cmd |= (1 << 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	cmd |= (reg << 18) & MIF_FRAME_REGAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	cmd |= (MIF_FRAME_TAMSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	cmd |= (val & MIF_FRAME_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	writel(cmd, gp->regs + MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	while (limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		cmd = readl(gp->regs + MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		if (cmd & MIF_FRAME_TALSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	__sungem_phy_write(gp, mii_id, reg, val & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static inline void sungem_phy_write(struct gem *gp, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	__sungem_phy_write(gp, gp->mii_phy_addr, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) static inline void gem_enable_ints(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/* Enable all interrupts but TXDONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static inline void gem_disable_ints(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/* Disable all interrupts, including TXDONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	(void)readl(gp->regs + GREG_IMASK); /* write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static void gem_get_cell(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	BUG_ON(gp->cell_enabled < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	gp->cell_enabled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	if (gp->cell_enabled == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #endif /* CONFIG_PPC_PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) /* Turn off the chip's clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static void gem_put_cell(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	BUG_ON(gp->cell_enabled <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	gp->cell_enabled--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (gp->cell_enabled == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #endif /* CONFIG_PPC_PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static inline void gem_netif_stop(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	netif_trans_update(gp->dev);	/* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	napi_disable(&gp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	netif_tx_disable(gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) static inline void gem_netif_start(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	/* NOTE: unconditional netif_wake_queue is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	 * appropriate so long as all callers are assured to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	 * have free tx slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	netif_wake_queue(gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	napi_enable(&gp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static void gem_schedule_reset(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	gp->reset_task_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	schedule_work(&gp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	u32 pcs_miistat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			gp->dev->name, pcs_istat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if (!(pcs_istat & PCS_ISTAT_LSC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		netdev_err(dev, "PCS irq but no link status change???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	/* The link status bit latches on zero, so you must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 * read it twice in such a case to see a transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	 * to the link being up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	pcs_miistat = readl(gp->regs + PCS_MIISTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (!(pcs_miistat & PCS_MIISTAT_LS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		pcs_miistat |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			(readl(gp->regs + PCS_MIISTAT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			 PCS_MIISTAT_LS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (pcs_miistat & PCS_MIISTAT_ANC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		/* The remote-fault indication is only valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		 * when autoneg has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		if (pcs_miistat & PCS_MIISTAT_RF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			netdev_info(dev, "PCS AutoNEG complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	if (pcs_miistat & PCS_MIISTAT_LS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		netdev_info(dev, "PCS link is now up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		netif_carrier_on(gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		netdev_info(dev, "PCS link is now down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		netif_carrier_off(gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		/* If this happens and the link timer is not running,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		 * reset so we re-negotiate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		if (!timer_pending(&gp->link_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			gp->dev->name, txmac_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* Defer timer expiration is quite normal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	 * don't even log the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if ((txmac_stat & MAC_TXSTAT_DTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	    !(txmac_stat & ~MAC_TXSTAT_DTE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (txmac_stat & MAC_TXSTAT_URUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		netdev_err(dev, "TX MAC xmit underrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	if (txmac_stat & MAC_TXSTAT_MPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		netdev_err(dev, "TX MAC max packet size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* The rest are all cases of one of the 16-bit TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * counters expiring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (txmac_stat & MAC_TXSTAT_NCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		dev->stats.collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (txmac_stat & MAC_TXSTAT_ECE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		dev->stats.tx_aborted_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		dev->stats.collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (txmac_stat & MAC_TXSTAT_LCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		dev->stats.tx_aborted_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		dev->stats.collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	/* We do not keep track of MAC_TXSTAT_FCE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 * MAC_TXSTAT_PCE events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  * so we do the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  * If any part of the reset goes wrong, we return 1 and that causes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  * whole chip to be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static int gem_rxmac_reset(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	int limit, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u64 desc_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	/* First, reset & disable MAC RX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	for (limit = 0; limit < 5000; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	if (limit == 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		netdev_err(dev, "RX MAC will not reset, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	       gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	for (limit = 0; limit < 5000; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (limit == 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	/* Second, disable RX DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	writel(0, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	for (limit = 0; limit < 5000; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (limit == 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	mdelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* Execute RX reset command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	writel(gp->swrst_base | GREG_SWRST_RXRST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	       gp->regs + GREG_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	for (limit = 0; limit < 5000; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (limit == 5000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* Refresh the RX ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		struct gem_rxd *rxd = &gp->init_block->rxd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (gp->rx_skbs[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	gp->rx_new = gp->rx_old = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	/* Now we must reprogram the rest of RX unit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	desc_dma = (u64) gp->gblock_dvma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	       (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	writel(val, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		writel(((5 & RXDMA_BLANK_IPKTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			((8 << 12) & RXDMA_BLANK_ITIME)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		       gp->regs + RXDMA_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		writel(((5 & RXDMA_BLANK_IPKTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			((4 << 12) & RXDMA_BLANK_ITIME)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		       gp->regs + RXDMA_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	writel(val, gp->regs + RXDMA_PTHRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	val = readl(gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	val = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			gp->dev->name, rxmac_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (rxmac_stat & MAC_RXSTAT_OFLW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		u32 smac = readl(gp->regs + MAC_SMACHINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		dev->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		ret = gem_rxmac_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (rxmac_stat & MAC_RXSTAT_ACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		dev->stats.rx_frame_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (rxmac_stat & MAC_RXSTAT_CCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		dev->stats.rx_crc_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (rxmac_stat & MAC_RXSTAT_LCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		dev->stats.rx_length_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			gp->dev->name, mac_cstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	/* This interrupt is just for pause frame and pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 * tracking.  It is useful for diagnostics and debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 * but probably by default we will mask these events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (mac_cstat & MAC_CSTAT_PS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		gp->pause_entered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (mac_cstat & MAC_CSTAT_PRCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		gp->pause_last_time_recvd = (mac_cstat >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	u32 mif_status = readl(gp->regs + MIF_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	u32 reg_val, changed_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	changed_bits = (mif_status & MIF_STATUS_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	gem_handle_mif_event(gp, reg_val, changed_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	    gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		netdev_err(dev, "PCI error [%04x]", pci_estat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (pci_estat & GREG_PCIESTAT_BADACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			pr_cont(" <No ACK64# during ABS64 cycle>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		if (pci_estat & GREG_PCIESTAT_DTRTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			pr_cont(" <Delayed transaction timeout>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		if (pci_estat & GREG_PCIESTAT_OTHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			pr_cont(" <other>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		pci_estat |= GREG_PCIESTAT_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		netdev_err(dev, "PCI error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (pci_estat & GREG_PCIESTAT_OTHER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		int pci_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		/* Interrogate PCI config space for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		 * true cause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		pci_errs = pci_status_get_and_clear_errors(gp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		if (pci_errs & PCI_STATUS_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			netdev_err(dev, "PCI parity error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			netdev_err(dev, "PCI target abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			netdev_err(dev, "PCI master acks target abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			netdev_err(dev, "PCI master abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			netdev_err(dev, "PCI system error SERR#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (pci_errs & PCI_STATUS_DETECTED_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			netdev_err(dev, "PCI parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	/* For all PCI errors, we should reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) /* All non-normal interrupt conditions get serviced here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * Returns non-zero if we should just exit the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * handler right now (ie. if we reset the card which invalidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * all of the other original irq status bits).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (gem_status & GREG_STAT_RXNOBUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		/* Frame arrived, no free RX buffers available. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (netif_msg_rx_err(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			printk(KERN_DEBUG "%s: no buffer for rx frame\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				gp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (gem_status & GREG_STAT_RXTAGERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		/* corrupt RX tag framing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		if (netif_msg_rx_err(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				gp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	if (gem_status & GREG_STAT_PCS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		if (gem_pcs_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (gem_status & GREG_STAT_TXMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		if (gem_txmac_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (gem_status & GREG_STAT_RXMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		if (gem_rxmac_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	if (gem_status & GREG_STAT_MAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		if (gem_mac_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	if (gem_status & GREG_STAT_MIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (gem_mif_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	if (gem_status & GREG_STAT_PCIERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (gem_pci_interrupt(dev, gp, gem_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	int entry, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	entry = gp->tx_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	while (entry != limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		struct gem_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		u32 dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		int frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (netif_msg_tx_done(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			printk(KERN_DEBUG "%s: tx done, slot %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				gp->dev->name, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		skb = gp->tx_skbs[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		if (skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			int last = entry + skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			int walk = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			int incomplete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			last &= (TX_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 				walk = NEXT_TX(walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				if (walk == limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 					incomplete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				if (walk == last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			if (incomplete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		gp->tx_skbs[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			txd = &gp->init_block->txd[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			dma_addr = le64_to_cpu(txd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				       DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	gp->tx_old = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/* Need to make the tx_old update visible to gem_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 * before checking for netif_queue_stopped().  Without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 * memory barrier, there is a small possibility that gem_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	 * will miss it and cause the queue to be stopped forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	if (unlikely(netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		     TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		__netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		    TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 		__netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static __inline__ void gem_post_rxds(struct gem *gp, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	int cluster_start, curr, count, kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	cluster_start = curr = (gp->rx_new & ~(4 - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	kick = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	while (curr != limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		curr = NEXT_RX(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		if (++count == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			struct gem_rxd *rxd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 				&gp->init_block->rxd[cluster_start];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 				rxd++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 				cluster_start = NEXT_RX(cluster_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 				if (cluster_start == curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			kick = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (kick >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		writel(kick, gp->regs + RXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) #define ALIGNED_RX_SKB_ADDR(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)         ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 						gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	if (likely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		skb_reserve(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static int gem_rx(struct gem *gp, int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	int entry, drops, work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	u32 done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (netif_msg_rx_status(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	entry = gp->rx_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	done = readl(gp->regs + RXDMA_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		struct gem_rxd *rxd = &gp->init_block->rxd[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		u64 status = le64_to_cpu(rxd->status_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if ((status & RXDCTRL_OWN) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		/* When writing back RX descriptor, GEM writes status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 * then buffer address, possibly in separate transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 * If we don't wait for the chip to write both, we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		 * post a new buffer to this descriptor then have GEM spam
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		 * on the buffer address.  We sync on the RX completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		 * register to prevent this from happening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		if (entry == done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			done = readl(gp->regs + RXDMA_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			if (entry == done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		/* We can now account for the work we're about to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		skb = gp->rx_skbs[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		len = (status & RXDCTRL_BUFSZ) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			if (len < ETH_ZLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 				dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			if (len & RXDCTRL_BAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			/* We'll just return it to GEM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		drop_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		dma_addr = le64_to_cpu(rxd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		if (len > RX_COPY_THRESHOLD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			struct sk_buff *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			if (new_skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 				drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 				goto drop_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			dma_unmap_page(&gp->pdev->dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				       RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			gp->rx_skbs[entry] = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 							       virt_to_page(new_skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 							       offset_in_page(new_skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 							       RX_BUF_ALLOC_SIZE(gp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 							       DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			skb_reserve(new_skb, RX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			/* Trim the original skb for the netif. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			skb_trim(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			if (copy_skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				goto drop_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			skb_reserve(copy_skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			skb_put(copy_skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 						DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			skb_copy_from_linear_data(skb, copy_skb->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			dma_sync_single_for_device(&gp->pdev->dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 						   len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			/* We'll reuse the original ring buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 			skb = copy_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		if (likely(dev->features & NETIF_F_RXCSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			__sum16 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			skb->csum = csum_unfold(csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			skb->ip_summed = CHECKSUM_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		skb->protocol = eth_type_trans(skb, gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		napi_gro_receive(&gp->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		dev->stats.rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		entry = NEXT_RX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	gem_post_rxds(gp, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	gp->rx_new = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (drops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		netdev_info(gp->dev, "Memory squeeze, deferring packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static int gem_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct gem *gp = container_of(napi, struct gem, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		/* Handle anomalies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		if (unlikely(gp->status & GREG_STAT_ABNORMAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			int reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			/* We run the abnormal interrupt handling code with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			 * the Tx lock. It only resets the Rx portion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			 * chip, but we need to guard it against DMA being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			 * restarted by the link poll timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			__netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			reset = gem_abnormal_irq(dev, gp, gp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			__netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			if (reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				gem_schedule_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 				napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		/* Run TX completion thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		gem_tx(dev, gp, gp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		/* Run RX thread. We don't use any locking here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		 * code willing to do bad things - like cleaning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		 * rx ring - must call napi_disable(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		 * schedule_timeout()'s if polling is already disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		work_done += gem_rx(gp, budget - work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		if (work_done >= budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		gp->status = readl(gp->regs + GREG_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	} while (gp->status & GREG_STAT_NAPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	gem_enable_ints(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) static irqreturn_t gem_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (napi_schedule_prep(&gp->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		u32 gem_status = readl(gp->regs + GREG_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (unlikely(gem_status == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			napi_enable(&gp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		if (netif_msg_intr(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			       gp->dev->name, gem_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		gp->status = gem_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		gem_disable_ints(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		__napi_schedule(&gp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	/* If polling was disabled at the time we received that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * interrupt, we may return IRQ_HANDLED here while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 * should return IRQ_NONE. No big deal...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static void gem_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	disable_irq(gp->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	gem_interrupt(gp->pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	enable_irq(gp->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	netdev_err(dev, "transmit timed out, resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		   readl(gp->regs + TXDMA_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		   readl(gp->regs + MAC_TXSTAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		   readl(gp->regs + MAC_TXCFG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		   readl(gp->regs + RXDMA_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		   readl(gp->regs + MAC_RXSTAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		   readl(gp->regs + MAC_RXCFG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	gem_schedule_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) static __inline__ int gem_intme(int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	/* Algorithm: IRQ every 1/2 of descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (!(entry & ((TX_RING_SIZE>>1)-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 				  struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		const u64 csum_start_off = skb_checksum_start_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		ctrl = (TXDCTRL_CENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			(csum_start_off << 15) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			(csum_stuff_off << 21));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		/* This is a hard error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		if (!netif_queue_stopped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	entry = gp->tx_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	gp->tx_skbs[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (skb_shinfo(skb)->nr_frags == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		struct gem_txd *txd = &gp->init_block->txd[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		mapping = dma_map_page(&gp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				       virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 				       offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				       len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		if (gem_intme(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			ctrl |= TXDCTRL_INTME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		txd->buffer = cpu_to_le64(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		txd->control_word = cpu_to_le64(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		struct gem_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		u32 first_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		u64 intme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		dma_addr_t first_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		int frag, first_entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		intme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (gem_intme(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			intme |= TXDCTRL_INTME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		/* We must give this initial chunk to the device last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		 * Otherwise we could race with the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		first_len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		first_mapping = dma_map_page(&gp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 					     virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 					     offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 					     first_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			u64 this_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			len = skb_frag_size(this_frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 						   0, len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			this_ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			if (frag == skb_shinfo(skb)->nr_frags - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				this_ctrl |= TXDCTRL_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			txd = &gp->init_block->txd[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			txd->buffer = cpu_to_le64(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			txd->control_word = cpu_to_le64(this_ctrl | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			if (gem_intme(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				intme |= TXDCTRL_INTME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		txd = &gp->init_block->txd[first_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		txd->buffer = cpu_to_le64(first_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		txd->control_word =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	gp->tx_new = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		/* netif_stop_queue() must be done before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		 * checking tx index in TX_BUFFS_AVAIL() below, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		 * in gem_tx(), we update tx_old before checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		 * netif_queue_stopped().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (netif_msg_tx_queued(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		       dev->name, entry, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	writel(gp->tx_new, gp->regs + TXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static void gem_pcs_reset(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	/* Reset PCS unit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	val = readl(gp->regs + PCS_MIICTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	val |= PCS_MIICTRL_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	writel(val, gp->regs + PCS_MIICTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	limit = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		if (limit-- <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		netdev_warn(gp->dev, "PCS reset bit would not clear\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void gem_pcs_reinit_adv(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	/* Make sure PCS is disabled while changing advertisement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	 * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	val = readl(gp->regs + PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	writel(val, gp->regs + PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	/* Advertise all capabilities except asymmetric
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	 * pause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	val = readl(gp->regs + PCS_MIIADV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		PCS_MIIADV_SP | PCS_MIIADV_AP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	writel(val, gp->regs + PCS_MIIADV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	/* Enable and restart auto-negotiation, disable wrapback/loopback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * and re-enable PCS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	val = readl(gp->regs + PCS_MIICTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	val &= ~PCS_MIICTRL_WB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	writel(val, gp->regs + PCS_MIICTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	val = readl(gp->regs + PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	val |= PCS_CFG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	writel(val, gp->regs + PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	/* Make sure serialink loopback is off.  The meaning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 * of this bit is logically inverted based upon whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 * you are in Serialink or SERDES mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	val = readl(gp->regs + PCS_SCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	if (gp->phy_type == phy_serialink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		val &= ~PCS_SCTRL_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		val |= PCS_SCTRL_LOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	writel(val, gp->regs + PCS_SCTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #define STOP_TRIES 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static void gem_reset(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/* Make sure we won't get any more interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	writel(0xffffffff, gp->regs + GREG_IMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	/* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	       gp->regs + GREG_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	limit = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		val = readl(gp->regs + GREG_SWRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		if (limit-- <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		netdev_err(gp->dev, "SW reset is ghetto\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		gem_pcs_reinit_adv(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static void gem_start_dma(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	/* We are ready to rock, turn everything on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	val = readl(gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	val = readl(gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	val = readl(gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	val = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	(void) readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	gem_enable_ints(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* DMA won't be actually stopped before about 4ms tho ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void gem_stop_dma(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	/* We are done rocking, turn everything off. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	val = readl(gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	val = readl(gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	val = readl(gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	val = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	(void) readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	/* Need to wait a bit ... done by the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) // XXX dbl check what that function should do when called on PCS PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) static void gem_begin_auto_negotiation(struct gem *gp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				       const struct ethtool_link_ksettings *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	u32 advertise, features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	int autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	int speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	int duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	u32 advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		ethtool_convert_link_mode_to_legacy_u32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			&advertising, ep->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	if (gp->phy_type != phy_mii_mdio0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)      	    gp->phy_type != phy_mii_mdio1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)      	    	goto non_mii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	/* Setup advertise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (found_mii_phy(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		features = gp->phy_mii.def->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	advertise = features & ADVERTISE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	if (gp->phy_mii.advertising != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		advertise &= gp->phy_mii.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	autoneg = gp->want_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	speed = gp->phy_mii.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	duplex = gp->phy_mii.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	/* Setup link parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		goto start_aneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (ep->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		advertise = advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		autoneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		speed = ep->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		duplex = ep->base.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) start_aneg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	/* Sanitize settings based on PHY capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if ((features & SUPPORTED_Autoneg) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (speed == SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	    !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (speed == SPEED_100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	    !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (duplex == DUPLEX_FULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	    !(features & (SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	    		  SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	    		  SUPPORTED_10baseT_Full)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	    	duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	if (speed == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	/* If we are asleep, we don't try to actually setup the PHY, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	 * just store the settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (!netif_device_present(gp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		gp->phy_mii.speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		gp->phy_mii.duplex = duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	/* Configure PHY & start aneg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	gp->want_autoneg = autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		if (found_mii_phy(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		gp->lstate = link_aneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		if (found_mii_phy(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		gp->lstate = link_force_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) non_mii:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	gp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* A link-up condition has occurred, initialize and enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * rest of the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static int gem_set_link_modes(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	int full_duplex, speed, pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	pause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (found_mii_phy(gp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	    	if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	    		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		speed = gp->phy_mii.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		pause = gp->phy_mii.pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	} else if (gp->phy_type == phy_serialink ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	    	   gp->phy_type == phy_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		   speed, (full_duplex ? "full" : "half"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	/* We take the tx queue lock to avoid collisions between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 * this code, the tx path and the NAPI-driven error path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	__netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		/* MAC_TXCFG_NBO must be zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	writel(val, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if (!full_duplex &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	    (gp->phy_type == phy_mii_mdio0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	     gp->phy_type == phy_mii_mdio1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		val |= MAC_XIFCFG_DISE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	} else if (full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		val |= MAC_XIFCFG_FLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		val |= (MAC_XIFCFG_GMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	writel(val, gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	/* If gigabit and half-duplex, enable carrier extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	 * mode.  Else, disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (speed == SPEED_1000 && !full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		val = readl(gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		val = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 		writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		val = readl(gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		val = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	if (gp->phy_type == phy_serialink ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	    gp->phy_type == phy_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  		u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (!full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		writel(512, gp->regs + MAC_STIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		writel(64, gp->regs + MAC_STIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	val = readl(gp->regs + MAC_MCCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	writel(val, gp->regs + MAC_MCCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	gem_start_dma(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	__netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (netif_msg_link(gp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		if (pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			netdev_info(gp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 				    "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 				    gp->rx_fifo_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				    gp->rx_pause_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 				    gp->rx_pause_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			netdev_info(gp->dev, "Pause is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) static int gem_mdio_link_not_up(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	switch (gp->lstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	case link_force_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		netif_info(gp, link, gp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			   "Autoneg failed again, keeping forced mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			gp->last_forced_speed, DUPLEX_HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		gp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		gp->lstate = link_force_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	case link_aneg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		/* We try forced modes after a failed aneg only on PHYs that don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		 * have "magic_aneg" bit set, which means they internally do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		 * while forced-mode thingy. On these, we just restart aneg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		if (gp->phy_mii.def->magic_aneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		netif_info(gp, link, gp->dev, "switching to forced 100bt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		/* Try forced modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			DUPLEX_HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		gp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		gp->lstate = link_force_try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	case link_force_try:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		/* Downgrade from 100 to 10 Mbps if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		 * If already at 10Mbps, warn user about the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		 * situation every 10 ticks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		if (gp->phy_mii.speed == SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 				DUPLEX_HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			gp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			netif_info(gp, link, gp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 				   "switching to forced 10bt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) static void gem_link_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	struct gem *gp = from_timer(gp, t, link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	int restart_aneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	/* There's no point doing anything if we're going to be reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	if (gp->reset_task_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (gp->phy_type == phy_serialink ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	    gp->phy_type == phy_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		u32 val = readl(gp->regs + PCS_MIISTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		if (!(val & PCS_MIISTAT_LS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			val = readl(gp->regs + PCS_MIISTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		if ((val & PCS_MIISTAT_LS) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			if (gp->lstate == link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 				goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			gp->lstate = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			(void)gem_set_link_modes(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		/* Ok, here we got a link. If we had it due to a forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		 * fallback, and we were configured for autoneg, we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		 * retry a short autoneg pass. If you know your hub is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		 * broken, use ethtool ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		if (gp->lstate == link_force_try && gp->want_autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			gp->lstate = link_force_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			gp->last_forced_speed = gp->phy_mii.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			gp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			if (netif_msg_link(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				netdev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 					    "Got link after fallback, retrying autoneg once...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		} else if (gp->lstate != link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			gp->lstate = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			if (gem_set_link_modes(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				restart_aneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		/* If the link was previously up, we restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		 * whole process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		if (gp->lstate == link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			gp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			netif_info(gp, link, dev, "Link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 			netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 			gem_schedule_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			/* The reset task will restart the timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		} else if (++gp->timer_ticks > 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			if (found_mii_phy(gp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 				restart_aneg = gem_mdio_link_not_up(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 				restart_aneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	if (restart_aneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		gem_begin_auto_negotiation(gp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static void gem_clean_rings(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	struct gem_init_block *gb = gp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		struct gem_rxd *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		rxd = &gb->rxd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		if (gp->rx_skbs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			skb = gp->rx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			dma_addr = le64_to_cpu(rxd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			dma_unmap_page(&gp->pdev->dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 				       RX_BUF_ALLOC_SIZE(gp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				       DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			gp->rx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		rxd->status_word = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		rxd->buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		if (gp->tx_skbs[i] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			struct gem_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 			int frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			skb = gp->tx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			gp->tx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				int ent = i & (TX_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 				txd = &gb->txd[ent];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 				dma_addr = le64_to_cpu(txd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				dma_unmap_page(&gp->pdev->dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 					       le64_to_cpu(txd->control_word) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 					       TXDCTRL_BUFSZ, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 				if (frag != skb_shinfo(skb)->nr_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 					i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 			dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static void gem_init_rings(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	struct gem_init_block *gb = gp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	gem_clean_rings(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 			    (unsigned)VLAN_ETH_FRAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		struct gem_rxd *rxd = &gb->rxd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			rxd->buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			rxd->status_word = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		gp->rx_skbs[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		dma_addr = dma_map_page(&gp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 					virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 					offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 					RX_BUF_ALLOC_SIZE(gp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 					DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		rxd->buffer = cpu_to_le64(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		skb_reserve(skb, RX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		struct gem_txd *txd = &gb->txd[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		txd->control_word = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		dma_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		txd->buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) /* Init PHY interface and start link poll state machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void gem_init_phy(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	u32 mifcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	/* Revert MIF CFG setting done on stop_phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	mifcfg = readl(gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	mifcfg &= ~MIF_CFG_BBMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	writel(mifcfg, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		/* Those delay sucks, the HW seem to love them though, I'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		 * serisouly consider breaking some locks here to be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		 * to schedule instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		for (i = 0; i < 3; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			/* Some PHYs used by apple have problem getting back to us,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			 * we do an additional reset here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			sungem_phy_write(gp, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			if (i == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 				netdev_warn(gp->dev, "GMAC PHY not responding !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	    gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		/* Init datapath mode register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		if (gp->phy_type == phy_mii_mdio0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		    gp->phy_type == phy_mii_mdio1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			val = PCS_DMODE_MGM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		} else if (gp->phy_type == phy_serialink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			val = PCS_DMODE_SM | PCS_DMODE_GMOE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			val = PCS_DMODE_ESM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		writel(val, gp->regs + PCS_DMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	if (gp->phy_type == phy_mii_mdio0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	    gp->phy_type == phy_mii_mdio1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		/* Reset and detect MII PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		/* Init PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			gp->phy_mii.def->ops->init(&gp->phy_mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		gem_pcs_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		gem_pcs_reinit_adv(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	/* Default aneg parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	gp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	gp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	netif_carrier_off(gp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	/* Print things out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	if (gp->phy_type == phy_mii_mdio0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	    gp->phy_type == phy_mii_mdio1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		netdev_info(gp->dev, "Found %s PHY\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			    gp->phy_mii.def ? gp->phy_mii.def->name : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	gem_begin_auto_negotiation(gp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static void gem_init_dma(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	u64 desc_dma = (u64) gp->gblock_dvma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	writel(val, gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	writel(0, gp->regs + TXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	       (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	writel(val, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	val  = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	writel(val, gp->regs + RXDMA_PTHRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		writel(((5 & RXDMA_BLANK_IPKTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			((8 << 12) & RXDMA_BLANK_ITIME)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		       gp->regs + RXDMA_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		writel(((5 & RXDMA_BLANK_IPKTS) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			((4 << 12) & RXDMA_BLANK_ITIME)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		       gp->regs + RXDMA_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) static u32 gem_setup_multicast(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	u32 rxcfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	if ((gp->dev->flags & IFF_ALLMULTI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	    (netdev_mc_count(gp->dev) > 256)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	    	for (i=0; i<16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		rxcfg |= MAC_RXCFG_HFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	} else if (gp->dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		rxcfg |= MAC_RXCFG_PROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		u16 hash_table[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		memset(hash_table, 0, sizeof(hash_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		netdev_for_each_mc_addr(ha, gp->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			crc = ether_crc_le(6, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			crc >>= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	    	for (i=0; i<16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		rxcfg |= MAC_RXCFG_HFE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	return rxcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static void gem_init_mac(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	unsigned char *e = &gp->dev->dev_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	writel(0x00, gp->regs + MAC_IPG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	writel(0x08, gp->regs + MAC_IPG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	writel(0x04, gp->regs + MAC_IPG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	writel(0x40, gp->regs + MAC_STIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	writel(0x40, gp->regs + MAC_MINFSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	/* Ethernet payload + header + FCS + optional VLAN tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	writel(0x07, gp->regs + MAC_PASIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	writel(0x04, gp->regs + MAC_JAMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	writel(0x10, gp->regs + MAC_ATTLIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	writel(0x8808, gp->regs + MAC_MCTYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	writel(0, gp->regs + MAC_ADDR3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	writel(0, gp->regs + MAC_ADDR4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	writel(0, gp->regs + MAC_ADDR5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	writel(0x0001, gp->regs + MAC_ADDR6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	writel(0xc200, gp->regs + MAC_ADDR7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	writel(0x0180, gp->regs + MAC_ADDR8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	writel(0, gp->regs + MAC_AFILT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	writel(0, gp->regs + MAC_AFILT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	writel(0, gp->regs + MAC_AFILT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	writel(0, gp->regs + MAC_AF21MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	writel(0, gp->regs + MAC_AF0MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	gp->mac_rx_cfg = gem_setup_multicast(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) #ifdef STRIP_FCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	gp->mac_rx_cfg |= MAC_RXCFG_SFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	writel(0, gp->regs + MAC_NCOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	writel(0, gp->regs + MAC_FASUCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	writel(0, gp->regs + MAC_ECOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	writel(0, gp->regs + MAC_LCOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	writel(0, gp->regs + MAC_DTIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	writel(0, gp->regs + MAC_PATMPS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	writel(0, gp->regs + MAC_RFCTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	writel(0, gp->regs + MAC_LERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	writel(0, gp->regs + MAC_AERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	writel(0, gp->regs + MAC_FCSERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	writel(0, gp->regs + MAC_RXCVERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	/* Clear RX/TX/MAC/XIF config, we will set these up and enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	 * them once a link is established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	writel(0, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	writel(0, gp->regs + MAC_MCCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	writel(0, gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	/* Setup MAC interrupts.  We want to get all of the interesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * counter expiration events, but we do not want to hear about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 * normal rx/tx as the DMA engine tells us that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	/* Don't enable even the PAUSE interrupts for now, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	 * make no use of those events other than to record them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	writel(0xffffffff, gp->regs + MAC_MCMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	/* Don't enable GEM's WOL in normal operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (gp->has_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		writel(0, gp->regs + WOL_WAKECSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) static void gem_init_pause_thresholds(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)        	u32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	/* Calculate pause thresholds.  Setting the OFF threshold to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	 * full RX fifo size effectively disables PAUSE generation which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	 * is what we do for 10/100 only GEMs which have FIFOs too small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	 * to make real gains from PAUSE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (gp->rx_fifo_sz <= (2 * 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		int off = (gp->rx_fifo_sz - (max_frame * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		int on = off - max_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		gp->rx_pause_off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		gp->rx_pause_on = on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	/* Configure the chip "burst" DMA mode & enable some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	 * HW bug fixes on Apple version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)        	cfg  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)        	if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)        	cfg |= GREG_CFG_IBURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)        	cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)        	cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)        	writel(cfg, gp->regs + GREG_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	/* If Infinite Burst didn't stick, then use different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	 * thresholds (and Apple bug fixes don't exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 		cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		writel(cfg, gp->regs + GREG_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static int gem_check_invariants(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	struct pci_dev *pdev = gp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	u32 mif_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	/* On Apple's sungem, we can't rely on registers as the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	 * was been powered down by the firmware. The PHY is looked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	 * up later on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		gp->phy_type = phy_mii_mdio0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		gp->swrst_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		mif_cfg = readl(gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		mif_cfg |= MIF_CFG_MDI0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		writel(mif_cfg, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		/* We hard-code the PHY address so we can properly bring it out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		 * reset later on, we can't really probe it at this point, though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 		 * that isn't an issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			gp->mii_phy_addr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			gp->mii_phy_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	mif_cfg = readl(gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	if (pdev->vendor == PCI_VENDOR_ID_SUN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	    pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		/* One of the MII PHYs _must_ be present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		 * as this chip has no gigabit PHY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 			       mif_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	/* Determine initial PHY interface type guess.  MDIO1 is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	 * external PHY and thus takes precedence over MDIO0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (mif_cfg & MIF_CFG_MDI1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		gp->phy_type = phy_mii_mdio1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		mif_cfg |= MIF_CFG_PSELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		writel(mif_cfg, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	} else if (mif_cfg & MIF_CFG_MDI0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		gp->phy_type = phy_mii_mdio0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		mif_cfg &= ~MIF_CFG_PSELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		writel(mif_cfg, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) #ifdef CONFIG_SPARC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		const char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		p = of_get_property(gp->of_node, "shared-pins", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		if (p && !strcmp(p, "serdes"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			gp->phy_type = phy_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			gp->phy_type = phy_serialink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	if (gp->phy_type == phy_mii_mdio1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	    gp->phy_type == phy_mii_mdio0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			gp->mii_phy_addr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			if (sungem_phy_read(gp, MII_BMCR) != 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		if (i == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				pr_err("RIO MII phy will not respond\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			gp->phy_type = phy_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	/* Fetch the FIFO configurations now too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	if (pdev->vendor == PCI_VENDOR_ID_SUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			if (gp->tx_fifo_sz != (9 * 1024) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 			    gp->rx_fifo_sz != (20 * 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 				pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 				       gp->tx_fifo_sz, gp->rx_fifo_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			gp->swrst_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			if (gp->tx_fifo_sz != (2 * 1024) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			    gp->rx_fifo_sz != (2 * 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 				pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 				       gp->tx_fifo_sz, gp->rx_fifo_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) static void gem_reinit_chip(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	/* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	gem_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	/* Make sure ints are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	gem_disable_ints(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	/* Allocate & setup ring buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	gem_init_rings(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	/* Configure pause thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	gem_init_pause_thresholds(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	/* Init DMA & MAC engines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	gem_init_dma(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	gem_init_mac(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static void gem_stop_phy(struct gem *gp, int wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	u32 mifcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	/* Let the chip settle down a bit, it seems that helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	 * for sleep mode on some models
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	/* Make sure we aren't polling PHY status change. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	 * don't currently use that feature though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	mifcfg = readl(gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	mifcfg &= ~MIF_CFG_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	writel(mifcfg, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	if (wol && gp->has_wol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		unsigned char *e = &gp->dev->dev_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		/* Setup wake-on-lan for MAGIC packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		       gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		csr = WOL_WAKECSR_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			csr |= WOL_WAKECSR_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		writel(csr, gp->regs + WOL_WAKECSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		writel(0, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		(void)readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		/* Machine sleep will die in strange ways if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		 * dont wait a bit here, looks like the chip takes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		 * some time to really shut down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	writel(0, gp->regs + MAC_TXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	writel(0, gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	writel(0, gp->regs + TXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	writel(0, gp->regs + RXDMA_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	if (!wol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		gem_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			gp->phy_mii.def->ops->suspend(&gp->phy_mii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		/* According to Apple, we must set the MDIO pins to this begnign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		 * state or we may 1) eat more current, 2) damage some PHYs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		writel(0, gp->regs + MIF_BBCLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		writel(0, gp->regs + MIF_BBDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		writel(0, gp->regs + MIF_BBOENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		(void) readl(gp->regs + MAC_XIFCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) static int gem_do_start(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	pci_set_master(gp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	/* Init & setup chip hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	gem_reinit_chip(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	/* An interrupt might come in handy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	rc = request_irq(gp->pdev->irq, gem_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			 IRQF_SHARED, dev->name, (void *)dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		netdev_err(dev, "failed to request irq !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		gem_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		gem_clean_rings(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	/* Mark us as attached again if we come from resume(), this has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	 * no effect if we weren't detached and needs to be done now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	/* Restart NAPI & queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	gem_netif_start(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	/* Detect & init PHY, start autoneg etc... this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	 * eventually result in starting DMA operations when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	 * the link is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	gem_init_phy(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) static void gem_do_stop(struct net_device *dev, int wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	/* Stop NAPI and stop tx queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	gem_netif_stop(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	/* Make sure ints are disabled. We don't care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	 * synchronizing as NAPI is disabled, thus a stray
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	 * interrupt will do nothing bad (our irq handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	 * just schedules NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	gem_disable_ints(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	/* Stop the link timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	del_timer_sync(&gp->link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	/* We cannot cancel the reset task while holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	 * rtnl lock, we'd get an A->B / B->A deadlock stituation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	 * if we did. This is not an issue however as the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	 * task is synchronized vs. us (rtnl_lock) and will do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	 * nothing if the device is down or suspended. We do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	 * still clear reset_task_pending to avoid a spurrious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	 * reset later on in case we do resume before it gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	 * scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	gp->reset_task_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	/* If we are going to sleep with WOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	gem_stop_dma(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	if (!wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		gem_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	/* Get rid of rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	gem_clean_rings(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	/* No irq needed anymore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	free_irq(gp->pdev->irq, (void *) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	/* Shut the PHY down eventually and setup WOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	gem_stop_phy(gp, wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) static void gem_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	struct gem *gp = container_of(work, struct gem, reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	/* Lock out the network stack (essentially shield ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	 * against a racing open, close, control call, or suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	/* Skip the reset task if suspended or closed, or if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	 * been cancelled by gem_do_stop (see comment there)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	if (!netif_device_present(gp->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	    !netif_running(gp->dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	    !gp->reset_task_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	/* Stop the link timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	del_timer_sync(&gp->link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	/* Stop NAPI and tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	gem_netif_stop(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	/* Reset the chip & rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	gem_reinit_chip(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	if (gp->lstate == link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		gem_set_link_modes(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	/* Restart NAPI and Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	gem_netif_start(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	/* We are back ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	gp->reset_task_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	/* If the link is not up, restart autoneg, else restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	 * polling timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (gp->lstate != link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		gem_begin_auto_negotiation(gp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static int gem_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	/* We allow open while suspended, we just do nothing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	 * the chip will be initialized in resume()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	if (netif_device_present(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		/* Enable the cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		gem_get_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		/* Make sure PCI access and bus master are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		rc = pci_enable_device(gp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 			netdev_err(dev, "Failed to enable chip on PCI bus !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 			/* Put cell and forget it for now, it will be considered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 			 *as still asleep, a new sleep cycle may bring it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 			gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		return gem_do_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) static int gem_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (netif_device_present(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		gem_do_stop(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		/* Make sure bus master is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		pci_disable_device(gp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		/* Cell not needed neither if no WOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		if (!gp->asleep_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) static int __maybe_unused gem_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	/* Lock the network stack first to avoid racing with open/close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	 * reset task and setting calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	/* Not running, mark ourselves non-present, no need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	 * a lock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	if (!netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	netdev_info(dev, "suspending, WakeOnLan %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		    (gp->wake_on_lan && netif_running(dev)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		    "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	/* Tell the network stack we're gone. gem_do_stop() below will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	 * synchronize with TX, stop NAPI etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	/* Switch off chip, remember WOL setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	gp->asleep_wol = !!gp->wake_on_lan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	gem_do_stop(dev, gp->asleep_wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	/* Cell not needed neither if no WOL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (!gp->asleep_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	/* Unlock the network stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static int __maybe_unused gem_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	/* See locking comment in gem_suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	/* Not running, mark ourselves present, no need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 * a lock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	if (!netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	/* Enable the cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	gem_get_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	/* Restart chip. If that fails there isn't much we can do, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	 * leave things stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	gem_do_start(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	/* If we had WOL enabled, the cell clock was never turned off during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	 * sleep, so we end up beeing unbalanced. Fix that here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	if (gp->asleep_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	/* Unlock the network stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static struct net_device_stats *gem_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	/* I have seen this being called while the PM was in progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	 * so we shield against this. Let's also not poke at registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	 * while the reset task is going on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	 * TODO: Move stats collection elsewhere (link timer ?) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	 * make this a nop to avoid all those synchro issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (!netif_device_present(dev) || !netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	/* Better safe than sorry... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	if (WARN_ON(!gp->cell_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	writel(0, gp->regs + MAC_FCSERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	writel(0, gp->regs + MAC_AERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	writel(0, gp->regs + MAC_LERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 	dev->stats.collisions +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		(readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	writel(0, gp->regs + MAC_ECOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	writel(0, gp->regs + MAC_LCOLL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)  bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static int gem_set_mac_address(struct net_device *dev, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	struct sockaddr *macaddr = (struct sockaddr *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	unsigned char *e = &dev->dev_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	if (!is_valid_ether_addr(macaddr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	/* We'll just catch it later when the device is up'd or resumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	if (!netif_running(dev) || !netif_device_present(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	/* Better safe than sorry... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	if (WARN_ON(!gp->cell_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) static void gem_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	u32 rxcfg, rxcfg_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	int limit = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 	if (!netif_running(dev) || !netif_device_present(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	/* Better safe than sorry... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	rxcfg = readl(gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	rxcfg_new = gem_setup_multicast(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) #ifdef STRIP_FCS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	rxcfg_new |= MAC_RXCFG_SFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	gp->mac_rx_cfg = rxcfg_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		if (!limit--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	rxcfg |= rxcfg_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	writel(rxcfg, gp->regs + MAC_RXCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /* Jumbo-grams don't seem to work :-( */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) #define GEM_MIN_MTU	ETH_MIN_MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) #define GEM_MAX_MTU	ETH_DATA_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) #define GEM_MAX_MTU	9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) static int gem_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	/* We'll just catch it later when the device is up'd or resumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (!netif_running(dev) || !netif_device_present(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	/* Better safe than sorry... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	if (WARN_ON(!gp->cell_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	gem_netif_stop(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	gem_reinit_chip(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	if (gp->lstate == link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		gem_set_link_modes(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	gem_netif_start(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) static int gem_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 				  struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	if (gp->phy_type == phy_mii_mdio0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	    gp->phy_type == phy_mii_mdio1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		if (gp->phy_mii.def)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 			supported = gp->phy_mii.def->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 			supported = (SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 					  SUPPORTED_10baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		/* XXX hardcoded stuff for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 		cmd->base.port = PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		cmd->base.phy_address = 0; /* XXX fixed PHYAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		/* Return current PHY settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		cmd->base.autoneg = gp->want_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		cmd->base.speed = gp->phy_mii.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		cmd->base.duplex = gp->phy_mii.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 		advertising = gp->phy_mii.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		/* If we started with a forced mode, we don't have a default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		 * advertise set, we need to return something sensible so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		 * userland can re-enable autoneg properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		if (advertising == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 			advertising = supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	} else { // XXX PCS ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		supported =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 			 SUPPORTED_Autoneg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 		advertising = supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		cmd->base.speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		cmd->base.duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 		cmd->base.port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		cmd->base.phy_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 		cmd->base.autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		/* serdes means usually a Fibre connector, with most fixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		if (gp->phy_type == phy_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 			cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			supported = (SUPPORTED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 				SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 				SUPPORTED_FIBRE | SUPPORTED_Autoneg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 				SUPPORTED_Pause | SUPPORTED_Asym_Pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 			advertising = supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 			if (gp->lstate == link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 				cmd->base.speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 			cmd->base.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 			cmd->base.autoneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 						supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 						advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) static int gem_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 				  const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	u32 advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 						cmd->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	/* Verify the settings we care about. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	    cmd->base.autoneg != AUTONEG_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	if (cmd->base.autoneg == AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	    advertising == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	    ((speed != SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	      speed != SPEED_100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	      speed != SPEED_10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	     (cmd->base.duplex != DUPLEX_HALF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	      cmd->base.duplex != DUPLEX_FULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	/* Apply settings and restart link process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	if (netif_device_present(gp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		del_timer_sync(&gp->link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		gem_begin_auto_negotiation(gp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static int gem_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	if (!gp->want_autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	/* Restart link process  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	if (netif_device_present(gp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		del_timer_sync(&gp->link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		gem_begin_auto_negotiation(gp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) static u32 gem_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	return gp->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) static void gem_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	gp->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) /* Add more when I understand how to program the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) #define WOL_SUPPORTED_MASK	(WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	/* Add more when I understand how to program the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	if (gp->has_wol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		wol->supported = WOL_SUPPORTED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 		wol->wolopts = gp->wake_on_lan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 		wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	if (!gp->has_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static const struct ethtool_ops gem_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	.get_drvinfo		= gem_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	.get_link		= ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	.nway_reset		= gem_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	.get_msglevel		= gem_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	.set_msglevel		= gem_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	.get_wol		= gem_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	.set_wol		= gem_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	.get_link_ksettings	= gem_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	.set_link_ksettings	= gem_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 	struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	struct mii_ioctl_data *data = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	int rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	/* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 * netif_device_present() is true and holds rtnl_lock for us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	 * so we have nothing to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		data->phy_id = gp->mii_phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	case SIOCGMIIREG:		/* Read MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 					   data->reg_num & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	case SIOCSMIIREG:		/* Write MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		__sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 			    data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) /* Fetch MAC address from vital product data of PCI ROM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	int this_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	for (this_offset = 0x20; this_offset < len; this_offset++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		void __iomem *p = rom_base + this_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		if (readb(p + 0) != 0x90 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		    readb(p + 1) != 0x00 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 		    readb(p + 2) != 0x09 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		    readb(p + 3) != 0x4e ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 		    readb(p + 4) != 0x41 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		    readb(p + 5) != 0x06)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		this_offset += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		p += 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 			dev_addr[i] = readb(p + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	void __iomem *p = pci_map_rom(pdev, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 		int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		found = readb(p) == 0x55 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 			readb(p + 1) == 0xaa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 			find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		pci_unmap_rom(pdev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	/* Sun MAC prefix then 3 random bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	dev_addr[0] = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	dev_addr[1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	dev_addr[2] = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	get_random_bytes(dev_addr + 3, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) #endif /* not Sparc and not PPC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) static int gem_get_device_address(struct gem *gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	struct net_device *dev = gp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	const unsigned char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	addr = of_get_property(gp->of_node, "local-mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	if (addr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) #ifdef CONFIG_SPARC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		addr = idprom->id_ethaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		pr_err("%s: can't get mac-address\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	memcpy(dev->dev_addr, addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static void gem_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 		struct gem *gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 		/* Ensure reset task is truly gone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 		cancel_work_sync(&gp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		/* Free resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 				  gp->init_block, gp->gblock_dvma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		iounmap(gp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 		free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) static const struct net_device_ops gem_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	.ndo_open		= gem_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	.ndo_stop		= gem_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	.ndo_start_xmit		= gem_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	.ndo_get_stats		= gem_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	.ndo_set_rx_mode	= gem_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	.ndo_do_ioctl		= gem_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	.ndo_tx_timeout		= gem_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	.ndo_change_mtu		= gem_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	.ndo_set_mac_address    = gem_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	.ndo_poll_controller    = gem_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	unsigned long gemreg_base, gemreg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	struct gem *gp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	int err, pci_using_dac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	printk_once(KERN_INFO "%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	/* Apple gmac note: during probe, the chip is powered up by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	 * the arch code to allow the code below to work (and to let
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	 * the chip be probed on the config space. It won't stay powered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	 * up until the interface is brought up however, so we can't rely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	 * on register configuration done at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 		pr_err("Cannot enable MMIO operation, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 	/* Configure DMA attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	/* All of the GEM documentation states that 64-bit DMA addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	 * is fully supported and should work just fine.  However the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	 * front end for RIO based GEMs is different and only supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	 * 32-bit addressing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 	 * For now we assume the various PPC GEMs are 32-bit only as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	if (pdev->vendor == PCI_VENDOR_ID_SUN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	    pdev->device == PCI_DEVICE_ID_SUN_GEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	    !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 		pci_using_dac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			pr_err("No usable DMA configuration, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 			goto err_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 		pci_using_dac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	gemreg_base = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	gemreg_len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 	if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		pr_err("Cannot find proper PCI device base address, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 		goto err_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	dev = alloc_etherdev(sizeof(*gp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 		goto err_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	gp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	err = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		pr_err("Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		goto err_out_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	gp->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	gp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	gp->msg_enable = DEFAULT_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	timer_setup(&gp->link_timer, gem_link_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	INIT_WORK(&gp->reset_task, gem_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	gp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	gp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	gp->regs = ioremap(gemreg_base, gemreg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	if (!gp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 		pr_err("Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	/* On Apple, we want a reference to the Open Firmware device-tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	 * node. We use it for clock control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	gp->of_node = pci_device_to_OF_node(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	/* Only Apple version supports WOL afaik */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	if (pdev->vendor == PCI_VENDOR_ID_APPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 		gp->has_wol = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	/* Make sure cell is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	gem_get_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	/* Make sure everything is stopped and in init state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	gem_reset(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	/* Fill up the mii_phy structure (even if we won't use it) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	gp->phy_mii.dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	gp->phy_mii.mdio_read = _sungem_phy_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	gp->phy_mii.mdio_write = _sungem_phy_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	gp->phy_mii.platform_data = gp->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	/* By default, we start with autoneg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	gp->want_autoneg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	/* Check fifo sizes, PHY type, etc... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	if (gem_check_invariants(gp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	/* It is guaranteed that the returned buffer will be at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	 * PAGE_SIZE aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 					    &gp->gblock_dvma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	if (!gp->init_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		pr_err("Cannot allocate init block, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	err = gem_get_device_address(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		goto err_out_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	dev->netdev_ops = &gem_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	netif_napi_add(dev, &gp->napi, gem_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	dev->ethtool_ops = &gem_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	dev->watchdog_timeo = 5 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	dev->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	/* Set that now, in case PM kicks in now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	/* We can do scatter/gather and HW checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 	dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	dev->features = dev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	if (pci_using_dac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		dev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	/* MTU range: 68 - 1500 (Jumbo mode is broken) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	dev->min_mtu = GEM_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	dev->max_mtu = GEM_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	/* Register with kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	if (register_netdev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		pr_err("Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 		goto err_out_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 	/* Undo the get_cell with appropriate locking (we could use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	 * ndo_init/uninit but that would be even more clumsy imho)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 		    dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) err_out_free_consistent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	gem_remove_one(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	gem_put_cell(gp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	iounmap(gp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) err_out_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) err_disable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) static struct pci_driver gem_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	.name		= GEM_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	.id_table	= gem_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	.probe		= gem_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	.remove		= gem_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	.driver.pm	= &gem_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) module_pci_driver(gem_driver);