Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*  D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)     Copyright (c) 2001, 2002 by D-Link Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)     Written by Edward Peng.<edward_peng@dlink.com.tw>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)     Created 03-May-2001, base on Linux' sundance.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include "dl2k.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #define dw32(reg, val)	iowrite32(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #define dw16(reg, val)	iowrite16(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #define dw8(reg, val)	iowrite8(val, ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #define dr32(reg)	ioread32(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #define dr16(reg)	ioread16(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define dr8(reg)	ioread8(ioaddr + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #define MAX_UNITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) static int mtu[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) static int vlan[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) static int jumbo[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static char *media[MAX_UNITS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static int tx_flow=-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) static int rx_flow=-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static int copy_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) static int rx_coalesce=10;	/* Rx frame count each interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static int rx_timeout=200;	/* Rx DMA wait time in 640ns increments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) static int tx_coalesce=16;	/* HW xmit count each TxDMAComplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) MODULE_AUTHOR ("Edward Peng");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) module_param_array(mtu, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) module_param_array(media, charp, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) module_param_array(vlan, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) module_param_array(jumbo, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) module_param(tx_flow, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) module_param(rx_flow, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) module_param(copy_thresh, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) module_param(rx_coalesce, int, 0);	/* Rx frame count each interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) module_param(rx_timeout, int, 0);	/* Rx DMA wait time in 64ns increments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /* Enable the default interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)        UpdateStats | LinkEvent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) static void dl2k_enable_int(struct netdev_private *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	dw16(IntEnable, DEFAULT_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static const int max_intrloop = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) static const int multicast_filter_limit = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) static int rio_open (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void rio_timer (struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static void rio_tx_timeout (struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static irqreturn_t rio_interrupt (int irq, void *dev_instance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static void rio_free_tx (struct net_device *dev, int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void tx_error (struct net_device *dev, int tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int receive_packet (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static void rio_error (struct net_device *dev, int int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void set_multicast (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static struct net_device_stats *get_stats (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int clear_stats (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static int rio_close (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static int find_miiphy (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int parse_eeprom (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static int read_eeprom (struct netdev_private *, int eep_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static int mii_wait_link (struct net_device *dev, int wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static int mii_set_media (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int mii_get_media (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static int mii_set_media_pcs (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static int mii_get_media_pcs (struct net_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		      u16 data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) static const struct ethtool_ops ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static const struct net_device_ops netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	.ndo_open		= rio_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	.ndo_start_xmit	= start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	.ndo_stop		= rio_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	.ndo_get_stats		= get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	.ndo_set_mac_address 	= eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	.ndo_set_rx_mode	= set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	.ndo_do_ioctl		= rio_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	.ndo_tx_timeout		= rio_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	static int card_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	int chip_idx = ent->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	int err, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	void __iomem *ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	void *ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	dma_addr_t ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	err = pci_enable_device (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	err = pci_request_regions (pdev, "dl2k");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		goto err_out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	pci_set_master (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	dev = alloc_etherdev (sizeof (*np));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		goto err_out_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	/* IO registers range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	ioaddr = pci_iomap(pdev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	if (!ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		goto err_out_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	np->eeprom_addr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/* MM registers range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	ioaddr = pci_iomap(pdev, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	if (!ioaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	np->ioaddr = ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	np->chip_id = chip_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	np->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	spin_lock_init (&np->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	spin_lock_init (&np->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	/* Parse manual configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	np->an_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	np->tx_coalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (card_idx < MAX_UNITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		if (media[card_idx] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			np->an_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			if (strcmp (media[card_idx], "auto") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			    strcmp (media[card_idx], "autosense") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			    strcmp (media[card_idx], "0") == 0 ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 				np->an_enable = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			    strcmp (media[card_idx], "4") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 				np->speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 				np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				   strcmp (media[card_idx], "3") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				np->speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				   strcmp (media[card_idx], "2") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 				np->speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 				np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 				   strcmp (media[card_idx], "1") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 				np->speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 				np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 				 strcmp (media[card_idx], "6") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 				np->speed=1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 				np->full_duplex=1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 				 strcmp (media[card_idx], "5") == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 				np->speed = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 				np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 				np->an_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		if (jumbo[card_idx] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			np->jumbo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 			dev->mtu = MAX_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			np->jumbo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 				dev->mtu = mtu[card_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		    vlan[card_idx] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		if (rx_coalesce > 0 && rx_timeout > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			np->rx_coalesce = rx_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			np->rx_timeout = rx_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			np->coalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		np->tx_flow = (tx_flow == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		np->rx_flow = (rx_flow == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (tx_coalesce < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			tx_coalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		else if (tx_coalesce > TX_RING_SIZE-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			tx_coalesce = TX_RING_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	dev->netdev_ops = &netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	dev->ethtool_ops = &ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	dev->features = NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	/* MTU range: 68 - 1536 or 8000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	dev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	pci_set_drvdata (pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	np->tx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	np->tx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!ring_space)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		goto err_out_unmap_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	np->rx_ring = ring_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	np->rx_ring_dma = ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	/* Parse eeprom data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	parse_eeprom (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	/* Find PHY address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	err = find_miiphy (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		goto err_out_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* Fiber device? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	np->link_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/* Set media and reset PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (np->phy_media) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		/* default Auto-Negotiation for fiber deivices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 	if (np->an_enable == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			np->an_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		/* Auto-Negotiation is mandatory for 1000BASE-T,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		   IEEE 802.3ab Annex 28D page 14 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		if (np->speed == 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			np->an_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	err = register_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		goto err_out_unmap_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	card_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		dev->name, np->name, dev->dev_addr, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (tx_coalesce > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				tx_coalesce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	if (np->coalesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		       "rx_coalesce:\t%d packets\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		       "rx_timeout: \t%d ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				np->rx_coalesce, np->rx_timeout*640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (np->vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) err_out_unmap_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			  np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) err_out_unmap_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			  np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	pci_iounmap(pdev, np->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	pci_iounmap(pdev, np->eeprom_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) err_out_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	free_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) err_out_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	pci_release_regions (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) err_out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	pci_disable_device (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) find_miiphy (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	int i, phy_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	np->phy_addr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	for (i = 31; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		int mii_status = mii_read (dev, i, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		if (mii_status != 0xffff && mii_status != 0x0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			np->phy_addr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			phy_found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (!phy_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) parse_eeprom (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	u8 sromdata[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	u8 *psib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	PSROM_t psrom = (PSROM_t) sromdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	int cid, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	for (i = 0; i < 128; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {	/* D-Link Only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		/* Check CRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		crc = ~ether_crc_le (256 - 4, sromdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		if (psrom->crc != cpu_to_le32(crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			printk (KERN_ERR "%s: EEPROM data CRC error.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 					dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	/* Set MAC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		dev->dev_addr[i] = psrom->mac_addr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (np->chip_id == CHIP_IP1000A) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		np->led_mode = psrom->led_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	/* Parse Software Information Block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	i = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	psib = (u8 *) sromdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		cid = psib[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		next = psib[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			printk (KERN_ERR "Cell data error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		switch (cid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		case 0:	/* Format version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		case 1:	/* End of cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		case 2:	/* Duplex Polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			np->duplex_polarity = psib[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		case 3:	/* Wake Polarity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			np->wake_polarity = psib[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		case 9:	/* Adapter description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			j = (next - i > 255) ? 255 : next - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			memcpy (np->name, &(psib[i]), j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		case 8:	/* Reversed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		default:	/* Unknown cell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		i = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static void rio_set_led_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (np->chip_id != CHIP_IP1000A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	mode = dr32(ASICCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (np->led_mode & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		mode |= IPG_AC_LED_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (np->led_mode & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		mode |= IPG_AC_LED_MODE_BIT_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (np->led_mode & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		mode |= IPG_AC_LED_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	dw32(ASICCtrl, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) static void free_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* Free all the skbuffs in the queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		skb = np->rx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			dma_unmap_single(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 					 desc_to_dma(&np->rx_ring[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 					 skb->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			np->rx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		np->rx_ring[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		np->rx_ring[i].fraginfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		skb = np->tx_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			dma_unmap_single(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 					 desc_to_dma(&np->tx_ring[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 					 skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			np->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static void rio_reset_ring(struct netdev_private *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	np->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	np->cur_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	np->old_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	np->old_tx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	for (i = 0; i < TX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		np->tx_ring[i].status = cpu_to_le64(TFDDone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	for (i = 0; i < RX_RING_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		np->rx_ring[i].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  /* allocate and initialize Tx and Rx descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static int alloc_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	rio_reset_ring(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	for (i = 0; i < TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		np->tx_skbuff[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		np->tx_ring[i].next_desc = cpu_to_le64(np->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 					      ((i + 1) % TX_RING_SIZE) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 					      sizeof(struct netdev_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* Initialize Rx descriptors & allocate buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	for (i = 0; i < RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		/* Allocated fixed size of skbuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		np->rx_skbuff[i] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			free_list(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		np->rx_ring[i].next_desc = cpu_to_le64(np->rx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 						((i + 1) % RX_RING_SIZE) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 						sizeof(struct netdev_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		/* Rubicon now supports 40 bits of addressing space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		np->rx_ring[i].fraginfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		    cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 					       np->rx_buf_sz, DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) static void rio_hw_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	u16 macctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/* Reset all logic functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	dw16(ASICCtrl + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	rio_set_led_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/* DebugCtrl bit 4, 5, 9 must set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	if (np->chip_id == CHIP_IP1000A &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	    (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		/* PHY magic taken from ipg driver, undocumented registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		mii_write(dev, np->phy_addr, 31, 0x0001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		mii_write(dev, np->phy_addr, 27, 0x01e0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		mii_write(dev, np->phy_addr, 31, 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		mii_write(dev, np->phy_addr, 27, 0xeb8e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		mii_write(dev, np->phy_addr, 31, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		mii_write(dev, np->phy_addr, 30, 0x005e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		/* advertise 1000BASE-T half & full duplex, prefer MASTER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (np->phy_media)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		mii_set_media_pcs(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		mii_set_media(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	/* Jumbo frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (np->jumbo != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		dw16(MaxFrameSize, MAX_JUMBO+14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	/* Set RFDListPtr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	dw32(RFDListPtr0, np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	dw32(RFDListPtr1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	/* Set station address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 * too. However, it doesn't work on IP1000A so we use 16-bit access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	for (i = 0; i < 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		dw16(StationAddr0 + 2 * i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		     cpu_to_le16(((u16 *)dev->dev_addr)[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	set_multicast (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (np->coalesce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	/* Set RIO to poll every N*320nsec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	dw8(RxDMAPollPeriod, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	dw8(TxDMAPollPeriod, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	dw8(RxDMABurstThresh, 0x30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	dw8(RxDMAUrgentThresh, 0x30);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	dw32(RmonStatMask, 0x0007ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	/* clear statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	clear_stats (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* VLAN supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (np->vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		/* priority field in RxDMAIntCtrl  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		/* VLANId */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		dw16(VLANId, np->vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		/* Length/Type should be 0x8100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		dw32(VLANTag, 0x8100 << 16 | np->vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		   VLAN information tagged by TFC' VID, CFI fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	/* Start Tx/Rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	macctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	macctrl |= (np->full_duplex) ? DuplexSelect : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	dw16(MACCtrl, macctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static void rio_hw_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	dw16(IntEnable, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* Stop Tx and Rx logics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static int rio_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	const int irq = np->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	i = alloc_list(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	rio_hw_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		rio_hw_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		free_list(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	timer_setup(&np->timer, rio_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	np->timer.expires = jiffies + 1 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	netif_start_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	dl2k_enable_int(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) rio_timer (struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct netdev_private *np = from_timer(np, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct net_device *dev = pci_get_drvdata(np->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	unsigned int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	int next_tick = 1*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	spin_lock_irqsave(&np->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	/* Recover rx ring exhausted error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		printk(KERN_INFO "Try to recover rx ring exhausted...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		/* Re-allocate skbuffs to fill the descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			entry = np->old_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			/* Dropped packets don't need to re-allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			if (np->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 				skb = netdev_alloc_skb_ip_align(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 								np->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 				if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 					np->rx_ring[entry].fraginfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 					printk (KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 						"%s: Still unable to re-allocate Rx skbuff.#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 						dev->name, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				np->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				np->rx_ring[entry].fraginfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				    cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 								np->rx_buf_sz, DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			np->rx_ring[entry].fraginfo |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			    cpu_to_le64((u64)np->rx_buf_sz << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			np->rx_ring[entry].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		} /* end for */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	} /* end if */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	spin_unlock_irqrestore (&np->rx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	np->timer.expires = jiffies + next_tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) rio_tx_timeout (struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		dev->name, dr32(TxStatus));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	rio_free_tx(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	dev->if_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	netif_trans_update(dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) start_xmit (struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	struct netdev_desc *txdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	unsigned entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	u64 tfc_vlan_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	if (np->link_status == 0) {	/* Link Down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	entry = np->cur_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	np->tx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	txdesc = &np->tx_ring[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		txdesc->status |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		    cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				 IPChecksumEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (np->vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		tfc_vlan_tag = VLANTagInsert |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		    ((u64)np->vlan << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		    ((u64)skb->priority << 45);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 						       skb->len, DMA_TO_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	 * Work around: Always use 1 descriptor in 10Mbps mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (entry % np->tx_coalesce == 0 || np->speed == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 					      WordAlignDisable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 					      TxDMAIndicate |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 					      (1 << FragCountShift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 					      WordAlignDisable |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 					      (1 << FragCountShift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	/* TxDMAPollNow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* Schedule ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	dw32(CountDown, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			< TX_QUEUE_LEN - 1 && np->speed != 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		/* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	} else if (!netif_queue_stopped(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	/* The first TFDListPtr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	if (!dr32(TFDListPtr0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		dw32(TFDListPtr0, np->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		     entry * sizeof (struct netdev_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		dw32(TFDListPtr1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) rio_interrupt (int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct net_device *dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	unsigned int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int cnt = max_intrloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		int_status = dr16(IntStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		dw16(IntStatus, int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		int_status &= DEFAULT_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (int_status == 0 || --cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		/* Processing received packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if (int_status & RxDMAComplete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			receive_packet (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		/* TxDMAComplete interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if ((int_status & (TxDMAComplete|IntRequested))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			int tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			tx_status = dr32(TxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			if (tx_status & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				tx_error (dev, tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			/* Free used tx skbuffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			rio_free_tx (dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		/* Handle uncommon events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (int_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		    (HostError | LinkEvent | UpdateStats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			rio_error (dev, int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (np->cur_tx != np->old_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		dw32(CountDown, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) rio_free_tx (struct net_device *dev, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	int entry = np->old_tx % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	int tx_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	unsigned long flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		spin_lock(&np->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		spin_lock_irqsave(&np->tx_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	/* Free used tx skbuffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	while (entry != np->cur_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		skb = np->tx_skbuff[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		dma_unmap_single(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				 desc_to_dma(&np->tx_ring[entry]), skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 				 DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		np->tx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		entry = (entry + 1) % TX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		tx_use++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		spin_unlock(&np->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		spin_unlock_irqrestore(&np->tx_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	np->old_tx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* If the ring is no longer full, clear tx_full and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	   call netif_wake_queue() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	    ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	    < TX_QUEUE_LEN - 1 || np->speed == 10)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		netif_wake_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) tx_error (struct net_device *dev, int tx_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	int frame_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	frame_id = (tx_status & 0xffff0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		dev->name, tx_status, frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	/* Ttransmit Underrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (tx_status & 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		dw16(ASICCtrl + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		     TxReset | DMAReset | FIFOReset | NetworkReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		/* Wait for ResetBusy bit clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		for (i = 50; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			if (!(dr16(ASICCtrl + 2) & ResetBusy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			mdelay (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		rio_set_led_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		rio_free_tx (dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		/* Reset TFDListPtr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		dw32(TFDListPtr0, np->tx_ring_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		     np->old_tx * sizeof (struct netdev_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		dw32(TFDListPtr1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		/* Let TxStartThresh stay default value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	/* Late Collision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (tx_status & 0x04) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		/* TxReset and clear FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		dw16(ASICCtrl + 2, TxReset | FIFOReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		/* Wait reset done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		for (i = 50; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 			if (!(dr16(ASICCtrl + 2) & ResetBusy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			mdelay (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		rio_set_led_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		/* Let TxStartThresh stay default value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	/* Maximum Collisions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (tx_status & 0x08)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		dev->stats.collisions++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	/* Restart the Tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) receive_packet (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	int entry = np->cur_rx % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	int cnt = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		struct netdev_desc *desc = &np->rx_ring[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		int pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		u64 frame_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		if (!(desc->status & cpu_to_le64(RFDDone)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		    !(desc->status & cpu_to_le64(FrameStart)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		    !(desc->status & cpu_to_le64(FrameEnd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		/* Chip omits the CRC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		frame_status = le64_to_cpu(desc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		pkt_len = frame_status & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		if (--cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		/* Update rx error statistics, drop packet. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		if (frame_status & RFS_Errors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			if (frame_status & (RxRuntFrame | RxLengthError))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			if (frame_status & RxFCSError)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				dev->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			if (frame_status & RxAlignmentError && np->speed != 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			if (frame_status & RxFIFOOverrun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				dev->stats.rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 			struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			/* Small skbuffs for short packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			if (pkt_len > copy_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 				dma_unmap_single(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 						 desc_to_dma(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 						 np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 						 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				skb_put (skb = np->rx_skbuff[entry], pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 				np->rx_skbuff[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				dma_sync_single_for_cpu(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 							desc_to_dma(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 							np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 							DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				skb_copy_to_linear_data (skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 						  np->rx_skbuff[entry]->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 						  pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 				skb_put (skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 				dma_sync_single_for_device(&np->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 							   desc_to_dma(desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 							   np->rx_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 							   DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			skb->protocol = eth_type_trans (skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			/* Checksum done by hw, but csum value unavailable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			if (np->pdev->pci_rev_id >= 0x0c &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				!(frame_status & (TCPError | UDPError | IPError))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			netif_rx (skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		entry = (entry + 1) % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	spin_lock(&np->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	np->cur_rx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	/* Re-allocate skbuffs to fill the descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	entry = np->old_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	while (entry != np->cur_rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		/* Dropped packets don't need to re-allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (np->rx_skbuff[entry] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			if (skb == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 				np->rx_ring[entry].fraginfo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				printk (KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 					"%s: receive_packet: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 					"Unable to re-allocate Rx skbuff.#%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 					dev->name, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			np->rx_skbuff[entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			np->rx_ring[entry].fraginfo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			    cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 						       np->rx_buf_sz, DMA_FROM_DEVICE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		np->rx_ring[entry].fraginfo |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		    cpu_to_le64((u64)np->rx_buf_sz << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		np->rx_ring[entry].status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		entry = (entry + 1) % RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	np->old_rx = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	spin_unlock(&np->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) rio_error (struct net_device *dev, int int_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	u16 macctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	/* Link change event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (int_status & LinkEvent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		if (mii_wait_link (dev, 10) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			printk (KERN_INFO "%s: Link up\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			if (np->phy_media)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 				mii_get_media_pcs (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 				mii_get_media (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			if (np->speed == 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 				np->tx_coalesce = tx_coalesce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 				np->tx_coalesce = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			macctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			macctrl |= (np->full_duplex) ? DuplexSelect : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			macctrl |= (np->tx_flow) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				TxFlowControlEnable : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			macctrl |= (np->rx_flow) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 				RxFlowControlEnable : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			dw16(MACCtrl, macctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			np->link_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			printk (KERN_INFO "%s: Link off\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			np->link_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	/* UpdateStats statistics registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (int_status & UpdateStats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		get_stats (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	/* PCI Error, a catastronphic error related to the bus interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	   occurs, set GlobalReset and HostReset to reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (int_status & HostError) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			dev->name, int_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		dw16(ASICCtrl + 2, GlobalReset | HostReset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		mdelay (500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		rio_set_led_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static struct net_device_stats *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) get_stats (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	unsigned int stat_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/* All statistics registers need to be acknowledged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	   else statistic overflow could cause problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	dev->stats.rx_packets += dr32(FramesRcvOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	dev->stats.tx_packets += dr32(FramesXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	dev->stats.rx_bytes += dr32(OctetRcvOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	dev->stats.tx_bytes += dr32(OctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	dev->stats.multicast = dr32(McstFramesRcvdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	dev->stats.collisions += dr32(SingleColFrames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			     +  dr32(MultiColFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	/* detailed tx errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	stat_reg = dr16(FramesAbortXSColls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	dev->stats.tx_aborted_errors += stat_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	dev->stats.tx_errors += stat_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	stat_reg = dr16(CarrierSenseErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	dev->stats.tx_carrier_errors += stat_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	dev->stats.tx_errors += stat_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	/* Clear all other statistic register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	dr32(McstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	dr16(BcstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	dr32(McstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	dr16(BcstFramesRcvdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	dr16(MacControlFramesRcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	dr16(FrameTooLongErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	dr16(InRangeLengthErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	dr16(FramesCheckSeqErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	dr16(FramesLostRxErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	dr32(McstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	dr32(BcstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	dr32(McstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	dr32(FramesWDeferredXmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	dr32(LateCollisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	dr16(BcstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	dr16(MacControlFramesXmtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	dr16(FramesWEXDeferal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	for (i = 0x100; i <= 0x150; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		dr32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	dr16(TxJumboFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	dr16(RxJumboFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	dr16(TCPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	dr16(UDPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	dr16(IPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) clear_stats (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	/* All statistics registers need to be acknowledged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	   else statistic overflow could cause problems */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	dr32(FramesRcvOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	dr32(FramesXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	dr32(OctetRcvOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	dr32(OctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	dr32(McstFramesRcvdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	dr32(SingleColFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	dr32(MultiColFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	dr32(LateCollisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	/* detailed rx errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	dr16(FrameTooLongErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	dr16(InRangeLengthErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	dr16(FramesCheckSeqErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	dr16(FramesLostRxErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	/* detailed tx errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	dr16(FramesAbortXSColls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	dr16(CarrierSenseErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/* Clear all other statistic register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	dr32(McstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	dr16(BcstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	dr32(McstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	dr16(BcstFramesRcvdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	dr16(MacControlFramesRcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	dr32(McstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	dr32(BcstOctetXmtOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	dr32(McstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	dr32(FramesWDeferredXmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	dr16(BcstFramesXmtdOk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	dr16(MacControlFramesXmtd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dr16(FramesWEXDeferal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	for (i = 0x100; i <= 0x150; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		dr32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	dr16(TxJumboFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	dr16(RxJumboFrames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	dr16(TCPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	dr16(UDPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	dr16(IPCheckSumErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) set_multicast (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	u32 hash_table[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	u16 rx_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	hash_table[0] = hash_table[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	hash_table[1] |= 0x02000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		/* Receive all frames promiscuously. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		rx_mode = ReceiveAllFrames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	} else if ((dev->flags & IFF_ALLMULTI) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			(netdev_mc_count(dev) > multicast_filter_limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		/* Receive broadcast and multicast frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	} else if (!netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		/* Receive broadcast frames and multicast frames filtering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		   by Hashtable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		rx_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		    ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			int bit, index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			int crc = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			/* The inverted high significant 6 bits of CRC are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			   used as an index to hashtable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			for (bit = 0; bit < 6; bit++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				if (crc & (1 << (31 - bit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 					index |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			hash_table[index / 32] |= (1 << (index % 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		rx_mode = ReceiveBroadcast | ReceiveUnicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (np->vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		/* ReceiveVLANMatch field in ReceiveMode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		rx_mode |= ReceiveVLANMatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	dw32(HashTable0, hash_table[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	dw32(HashTable1, hash_table[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	dw16(ReceiveMode, rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	strlcpy(info->driver, "dl2k", sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static int rio_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				  struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (np->phy_media) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		/* fiber device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		/* copper device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		supported = SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			| SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			SUPPORTED_Autoneg | SUPPORTED_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		advertising = ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			ADVERTISED_Autoneg | ADVERTISED_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		cmd->base.port = PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (np->link_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		cmd->base.speed = np->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		cmd->base.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		cmd->base.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (np->an_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	cmd->base.phy_address = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 						supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 						advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int rio_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				  const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	u8 duplex = cmd->base.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		if (np->an_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			np->an_enable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			mii_set_media(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		np->an_enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		if (np->speed == 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			np->speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			np->full_duplex = (duplex == DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			np->speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			np->full_duplex = (duplex == DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		case SPEED_1000: /* not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		mii_set_media(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static u32 rio_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	return np->link_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static const struct ethtool_ops ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	.get_drvinfo = rio_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	.get_link = rio_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	.get_link_ksettings = rio_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	.set_link_ksettings = rio_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	struct mii_ioctl_data *miidata = if_mii(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	case SIOCGMIIPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		miidata->phy_id = phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	case SIOCGMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	case SIOCSMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) #define EEP_READ 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) #define EEP_BUSY 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Read the EEPROM word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) static int read_eeprom(struct netdev_private *np, int eep_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	void __iomem *ioaddr = np->eeprom_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	int i = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		if (!(dr16(EepromCtrl) & EEP_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			return dr16(EepromData);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) enum phy_ctrl_bits {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	MII_DUPLEX = 0x08,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) #define mii_delay() dr8(PhyCtrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) mii_sendbit (struct net_device *dev, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	dw8(PhyCtrl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	mii_delay ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	dw8(PhyCtrl, data | MII_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	mii_delay ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) mii_getbit (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	void __iomem *ioaddr = np->ioaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	u8 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	dw8(PhyCtrl, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	mii_delay ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	dw8(PhyCtrl, data | MII_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	mii_delay ();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return (dr8(PhyCtrl) >> 1) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) mii_send_bits (struct net_device *dev, u32 data, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	for (i = len - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		mii_sendbit (dev, data & (1 << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) mii_read (struct net_device *dev, int phy_addr, int reg_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	u32 retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	/* Preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	mii_send_bits (dev, 0xffffffff, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	/* ST,OP = 0110'b for read operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	mii_send_bits (dev, cmd, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	/* Turnaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (mii_getbit (dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	/* Read data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		retval |= mii_getbit (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		retval <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	/* End cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	mii_getbit (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	return (retval >> 1) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)       err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	/* Preamble */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	mii_send_bits (dev, 0xffffffff, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	mii_send_bits (dev, cmd, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/* End cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	mii_getbit (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) mii_wait_link (struct net_device *dev, int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	__u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		bmsr = mii_read (dev, phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		if (bmsr & BMSR_LSTATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		mdelay (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	} while (--wait > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) mii_get_media (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	__u16 negotiate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	__u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	__u16 mscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	__u16 mssr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	bmsr = mii_read (dev, phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (np->an_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		if (!(bmsr & BMSR_ANEGCOMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			/* Auto-Negotiation not completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			mii_read (dev, phy_addr, MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		mscr = mii_read (dev, phy_addr, MII_CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		mssr = mii_read (dev, phy_addr, MII_STAT1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			np->speed = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		} else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 			np->speed = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		} else if (negotiate & ADVERTISE_100FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 			np->speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		} else if (negotiate & ADVERTISE_100HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			np->speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 			np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		} else if (negotiate & ADVERTISE_10FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			np->speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		} else if (negotiate & ADVERTISE_10HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			np->speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		if (negotiate & ADVERTISE_PAUSE_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			np->tx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			np->rx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		} else if (negotiate & ADVERTISE_PAUSE_ASYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			np->tx_flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			np->rx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		/* else tx_flow, rx_flow = user select  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		__u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		case BMCR_SPEED1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			printk (KERN_INFO "Operating at 1000 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		case BMCR_SPEED100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			printk (KERN_INFO "Operating at 100 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 			printk (KERN_INFO "Operating at 10 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		if (bmcr & BMCR_FULLDPLX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 			printk (KERN_CONT "Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 			printk (KERN_CONT "Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (np->tx_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		printk(KERN_INFO "Enable Tx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		printk(KERN_INFO "Disable Tx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (np->rx_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		printk(KERN_INFO "Enable Rx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		printk(KERN_INFO "Disable Rx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) mii_set_media (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	__u16 pscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	__u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	__u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	__u16 anar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	/* Does user set speed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	if (np->an_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		/* Advertise capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		bmsr = mii_read (dev, phy_addr, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 			~(ADVERTISE_100FULL | ADVERTISE_10FULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 			  ADVERTISE_100HALF | ADVERTISE_10HALF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 			  ADVERTISE_100BASE4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		if (bmsr & BMSR_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			anar |= ADVERTISE_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		if (bmsr & BMSR_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			anar |= ADVERTISE_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		if (bmsr & BMSR_100BASE4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 			anar |= ADVERTISE_100BASE4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		if (bmsr & BMSR_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			anar |= ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		if (bmsr & BMSR_10HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			anar |= ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		mii_write (dev, phy_addr, MII_ADVERTISE, anar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		/* Enable Auto crossover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		pscr |= 3 << 5;	/* 11'b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		/* Soft reset PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		/* Force speed setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		/* 1) Disable Auto crossover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		pscr &= ~(3 << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		/* 2) PHY Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		bmcr = mii_read (dev, phy_addr, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		bmcr |= BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		/* 3) Power Down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		bmcr = 0x1940;	/* must be 0x1940 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		mdelay (100);	/* wait a certain time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		/* 4) Advertise nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		mii_write (dev, phy_addr, MII_ADVERTISE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		/* 5) Set media and Power Up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		bmcr = BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		if (np->speed == 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			printk (KERN_INFO "Manual 100 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		} else if (np->speed == 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			printk (KERN_INFO "Manual 10 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		if (np->full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 			bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 			printk (KERN_CONT "Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 			printk (KERN_CONT "Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		/* Set 1000BaseT Master/Slave setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		mscr = mii_read (dev, phy_addr, MII_CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		mscr |= MII_MSCR_CFG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		mscr &= ~MII_MSCR_CFG_VALUE = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) mii_get_media_pcs (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	__u16 negotiate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	__u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	bmsr = mii_read (dev, phy_addr, PCS_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (np->an_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		if (!(bmsr & BMSR_ANEGCOMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			/* Auto-Negotiation not completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			mii_read (dev, phy_addr, PCS_ANLPAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		np->speed = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		if (negotiate & PCS_ANAR_FULL_DUPLEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			np->full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			np->full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (negotiate & PCS_ANAR_PAUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			np->tx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			np->rx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		} else if (negotiate & PCS_ANAR_ASYMMETRIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			np->tx_flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			np->rx_flow &= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		/* else tx_flow, rx_flow = user select  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		__u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		printk (KERN_INFO "Operating at 1000 Mbps, ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		if (bmcr & BMCR_FULLDPLX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			printk (KERN_CONT "Full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			printk (KERN_CONT "Half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	if (np->tx_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		printk(KERN_INFO "Enable Tx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		printk(KERN_INFO "Disable Tx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	if (np->rx_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		printk(KERN_INFO "Enable Rx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		printk(KERN_INFO "Disable Rx Flow Control\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) mii_set_media_pcs (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	__u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	__u16 esr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	__u16 anar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	int phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	struct netdev_private *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	phy_addr = np->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	/* Auto-Negotiation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (np->an_enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		/* Advertise capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		esr = mii_read (dev, phy_addr, PCS_ESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			~PCS_ANAR_HALF_DUPLEX &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			~PCS_ANAR_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			anar |= PCS_ANAR_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			anar |= PCS_ANAR_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		mii_write (dev, phy_addr, MII_ADVERTISE, anar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		/* Soft reset PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		/* Force speed setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		/* PHY Reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		bmcr = BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		if (np->full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			bmcr = BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 			printk (KERN_INFO "Manual full duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			printk (KERN_INFO "Manual half duplex\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		mii_write (dev, phy_addr, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		/*  Advertise nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		mii_write (dev, phy_addr, MII_ADVERTISE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) rio_close (struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	struct pci_dev *pdev = np->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	netif_stop_queue (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	rio_hw_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	free_irq(pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	del_timer_sync (&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	free_list(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) rio_remove1 (struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	struct net_device *dev = pci_get_drvdata (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		unregister_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 				  np->rx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 				  np->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) #ifdef MEM_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		pci_iounmap(pdev, np->ioaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		pci_iounmap(pdev, np->eeprom_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		free_netdev (dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		pci_release_regions (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		pci_disable_device (pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) static int rio_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	del_timer_sync(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	rio_hw_stop(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static int rio_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	struct netdev_private *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	rio_reset_ring(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	rio_hw_init(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	np->timer.expires = jiffies + 1 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	add_timer(&np->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	dl2k_enable_int(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static SIMPLE_DEV_PM_OPS(rio_pm_ops, rio_suspend, rio_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) #define RIO_PM_OPS    (&rio_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) #define RIO_PM_OPS	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) static struct pci_driver rio_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	.name		= "dl2k",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	.id_table	= rio_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	.probe		= rio_probe1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	.remove		= rio_remove1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	.driver.pm	= RIO_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) module_pci_driver(rio_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */