Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * This code is derived from the VIA reference driver (copyright message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * below) provided to Red Hat by VIA Networking Technologies, Inc. for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * addition to the Linux kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * The code has been merged into one source file, cleaned up to follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * for 64bit hardware platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * TODO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *	rx_copybreak/alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *	More testing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Additional fixes and clean up: Francois Romieu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * This source has not been verified for use in safety critical systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Please direct queries about the revamped driver to the linux-kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * list not VIA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * Original code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * Author: Chuang Liang-Shing, AJ Jiang
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * Date: Jan 24, 2003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #include <linux/crc-ccitt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #include "via-velocity.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) enum velocity_bus_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	BUS_PCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	BUS_PLATFORM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static int velocity_nics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static void velocity_set_power_state(struct velocity_info *vptr, char state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	void *addr = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	if (vptr->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		pci_set_power_state(vptr->pdev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		writeb(state, addr + 0x154);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  *	mac_get_cam_mask	-	Read a CAM mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  *	@regs: register block for this velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  *	@mask: buffer to store mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  *	Fetch the mask bits of the selected CAM and store them into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *	provided mask buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	/* Select CAM mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* read mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		*mask++ = readb(&(regs->MARCAM[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* disable CAMEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	/* Select mar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *	mac_set_cam_mask	-	Set a CAM mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *	@regs: register block for this velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *	@mask: CAM mask to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *	Store a new mask into a CAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	/* Select CAM mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		writeb(*mask++, &(regs->MARCAM[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	/* disable CAMEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	/* Select mar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	/* Select CAM mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		writeb(*mask++, &(regs->MARCAM[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	/* disable CAMEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	/* Select mar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  *	mac_set_cam	-	set CAM data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  *	@regs: register block of this velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *	@idx: Cam index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  *	@addr: 2 or 6 bytes of CAM data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  *	Load an address or vlan tag into a CAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	/* Select CAM mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	idx &= (64 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		writeb(*addr++, &(regs->MARCAM[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	/* Select mar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			     const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/* Select CAM mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	idx &= (64 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	writew(*((u16 *) addr), &regs->MARCAM[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	writeb(0, &regs->CAMADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	/* Select mar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  *	mac_wol_reset	-	reset WOL after exiting low power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  *	@regs: register block of this velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  *	Called after we drop out of wake on lan mode in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  *	reset the Wake on lan features. This function doesn't restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  *	the rest of the logic from the result of sleep/wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static void mac_wol_reset(struct mac_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	/* Turn off SWPTAG right after leaving power mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	/* clear sticky bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/* disable force PME-enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	/* disable power-event config bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	writew(0xFFFF, &regs->WOLCRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* clear power status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	writew(0xFFFF, &regs->WOLSRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static const struct ethtool_ops velocity_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)     Define module options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) MODULE_AUTHOR("VIA Networking Technologies, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define VELOCITY_PARAM(N, D) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	static int N[MAX_UNITS] = OPTION_DEFAULT;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	module_param_array(N, int, NULL, 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	MODULE_PARM_DESC(N, D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) #define RX_DESC_MIN     64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) #define RX_DESC_MAX     255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) #define RX_DESC_DEF     64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) #define TX_DESC_MIN     16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) #define TX_DESC_MAX     256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) #define TX_DESC_DEF     64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) #define RX_THRESH_MIN   0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) #define RX_THRESH_MAX   3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) #define RX_THRESH_DEF   0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) /* rx_thresh[] is used for controlling the receive fifo threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)    0: indicate the rxfifo threshold is 128 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)    1: indicate the rxfifo threshold is 512 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)    2: indicate the rxfifo threshold is 1024 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)    3: indicate the rxfifo threshold is store & forward.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define DMA_LENGTH_MIN  0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #define DMA_LENGTH_MAX  7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define DMA_LENGTH_DEF  6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) /* DMA_length[] is used for controlling the DMA length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)    0: 8 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)    1: 16 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)    2: 32 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)    3: 64 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)    4: 128 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)    5: 256 DWORDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)    6: SF(flush till emply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)    7: SF(flush till emply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) VELOCITY_PARAM(DMA_length, "DMA length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) #define IP_ALIG_DEF     0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) /* IP_byte_align[] is used for IP header DWORD byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)    0: indicate the IP header won't be DWORD byte aligned.(Default) .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303)    1: indicate the IP header will be DWORD byte aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304)       In some environment, the IP header should be DWORD byte aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)       or the packet will be droped when we receive it. (eg: IPVS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) #define FLOW_CNTL_DEF   1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) #define FLOW_CNTL_MIN   1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) #define FLOW_CNTL_MAX   5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) /* flow_control[] is used for setting the flow control ability of NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)    1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)    2: enable TX flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)    3: enable RX flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)    4: enable RX/TX flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)    5: disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) VELOCITY_PARAM(flow_control, "Enable flow control ability");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) #define MED_LNK_DEF 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) #define MED_LNK_MIN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) #define MED_LNK_MAX 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)    0: indicate autonegotiation for both speed and duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)    1: indicate 100Mbps half duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)    2: indicate 100Mbps full duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)    3: indicate 10Mbps half duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)    4: indicate 10Mbps full duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)    5: indicate 1000Mbps full duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)    Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)    if EEPROM have been set to the force mode, this option is ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)    by driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define WOL_OPT_DEF     0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) #define WOL_OPT_MIN     0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define WOL_OPT_MAX     7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) /* wol_opts[] is used for controlling wake on lan behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)    0: Wake up if recevied a magic packet. (Default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)    1: Wake up if link status is on/off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)    2: Wake up if recevied an arp packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)    4: Wake up if recevied any unicast packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)    Those value can be sumed up to support more than one option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) VELOCITY_PARAM(wol_opts, "Wake On Lan options");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static int rx_copybreak = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) module_param(rx_copybreak, int, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  *	Internal board variants. At the moment we have only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static struct velocity_info_tbl chip_info_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  *	Describe the PCI device identifiers that we support in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  *	device driver. Used for hotplug autoloading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) static const struct pci_device_id velocity_pci_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  *	Describe the OF device identifiers that we support in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  *	device driver. Used for devicetree nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static const struct of_device_id velocity_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	{ .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	{ /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) MODULE_DEVICE_TABLE(of, velocity_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  *	get_chip_name	- 	identifier to name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  *	@chip_id: chip identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  *	Given a chip identifier return a suitable description. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  *	a pointer a static string valid while the driver is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static const char *get_chip_name(enum chip_type chip_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	for (i = 0; chip_info_table[i].name != NULL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		if (chip_info_table[i].chip_id == chip_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	return chip_info_table[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  *	velocity_set_int_opt	-	parser for integer options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  *	@opt: pointer to option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  *	@val: value the user requested (or -1 for default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  *	@min: lowest value allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  *	@max: highest value allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  *	@def: default value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  *	@name: property name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *	Set an integer property in the module options. This function does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  *	all the verification and checking as well as reporting so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  *	we don't duplicate code for each option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				 char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (val == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		*opt = def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	else if (val < min || val > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			  name, min, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		*opt = def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		pr_info("set value of parameter %s to %d\n", name, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		*opt = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  *	velocity_set_bool_opt	-	parser for boolean options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  *	@opt: pointer to option value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  *	@val: value the user requested (or -1 for default)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  *	@def: default value (yes/no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  *	@flag: numeric value to set for true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  *	@name: property name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  *	Set a boolean property in the module options. This function does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  *	all the verification and checking as well as reporting so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  *	we don't duplicate code for each option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 				  char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	(*opt) &= (~flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (val == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		*opt |= (def ? flag : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	else if (val < 0 || val > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			  name, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		*opt |= (def ? flag : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		pr_info("set parameter %s to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			name, val ? "TRUE" : "FALSE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		*opt |= (val ? flag : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  *	velocity_get_options	-	set options on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  *	@opts: option structure for the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  *	@index: index of option to use in module options array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  *	Turn the module and command options into a single structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  *	for the current device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static void velocity_get_options(struct velocity_opt *opts, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			     RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			     "rx_thresh");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			     DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			     "DMA_length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			     RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			     "RxDescriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			     TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			     "TxDescriptors");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			     FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			     "flow_control");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			      IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			      "IP_byte_align");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			     MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			     "Media link mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			     WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			     "Wake On Lan options");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	opts->numrx = (opts->numrx & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  *	velocity_init_cam_filter	-	initialise CAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  *	@vptr: velocity to program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  *	Initialize the content addressable memory used for filters. Load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  *	appropriately according to the presence of VLAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static void velocity_init_cam_filter(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	unsigned int vid, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/* Disable all CAMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	mac_set_cam_mask(regs, vptr->mCAMmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/* Enable VCAMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		mac_set_vlan_cam(regs, i, (u8 *) &vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (++i >= VCAM_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static int velocity_vlan_rx_add_vid(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				    __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	spin_lock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	set_bit(vid, vptr->active_vlans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	velocity_init_cam_filter(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	spin_unlock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) static int velocity_vlan_rx_kill_vid(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 				     __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	spin_lock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	clear_bit(vid, vptr->active_vlans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	velocity_init_cam_filter(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	spin_unlock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  *	velocity_rx_reset	-	handle a receive reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  *	@vptr: velocity we are resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  *	Reset the ownership and status for the receive ring side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  *	Hand all the receive queue to the NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static void velocity_rx_reset(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	velocity_init_rx_ring_indexes(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	 *	Init state, all RD entries belong to the NIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	for (i = 0; i < vptr->options.numrx; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	writew(vptr->options.numrx, &regs->RBRDU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	writew(0, &regs->RDIdx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	writew(vptr->options.numrx - 1, &regs->RDCSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  *	velocity_get_opt_media_mode	-	get media selection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  *	@vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *	Get the media mode stored in EEPROM or module options and load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  *	mii_status accordingly. The requested link state information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  *	is also returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	u32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	switch (vptr->options.spd_dpx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	case SPD_DPX_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		status = VELOCITY_AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	case SPD_DPX_100_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	case SPD_DPX_10_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	case SPD_DPX_100_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		status = VELOCITY_SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	case SPD_DPX_10_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		status = VELOCITY_SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	case SPD_DPX_1000_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	vptr->mii_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  *	safe_disable_mii_autopoll	-	autopoll off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  *	@regs: velocity registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  *	Turn off the autopoll and wait for it to disable on the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	u16 ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	/*  turn off MAUTO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	writeb(0, &regs->MIICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  *	enable_mii_autopoll	-	turn on autopolling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *	@regs: velocity registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  *	Enable the MII link status autopoll feature on the Velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  *	hardware. Wait for it to enable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static void enable_mii_autopoll(struct mac_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	int ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	writeb(0, &(regs->MIICR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	writeb(MIIADR_SWMPL, &regs->MIIADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	writeb(MIICR_MAUTO, &regs->MIICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  *	velocity_mii_read	-	read MII data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  *	@regs: velocity registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  *	@index: MII register index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  *	@data: buffer for received data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  *	Perform a single read of an MII 16bit register. Returns zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  *	on success or -ETIMEDOUT if the PHY did not respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	u16 ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	safe_disable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	writeb(index, &regs->MIIADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if (!(readb(&regs->MIICR) & MIICR_RCMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	*data = readw(&regs->MIIDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	enable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (ww == W_MAX_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  *	mii_check_media_mode	-	check media state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  *	@regs: velocity registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  *	Check the current MII status and determine the link status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  *	accordingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	u32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	u16 ANAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		status |= VELOCITY_LINK_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		status |= (VELOCITY_SPEED_1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		if (ANAR & ADVERTISE_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		else if (ANAR & ADVERTISE_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			status |= VELOCITY_SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		else if (ANAR & ADVERTISE_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			status |= (VELOCITY_SPEED_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				status |= VELOCITY_AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  *	velocity_mii_write	-	write MII data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  *	@regs: velocity registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  *	@mii_addr: MII register index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  *	@data: 16bit data for the MII register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  *	Perform a single write to an MII 16bit register. Returns zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  *	on success or -ETIMEDOUT if the PHY did not respond.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	u16 ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	safe_disable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	/* MII reg offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	writeb(mii_addr, &regs->MIIADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/* set MII data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	writew(data, &regs->MIIDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/* turn on MIICR_WCMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/* W_MAX_TIMEOUT is the timeout period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		if (!(readb(&regs->MIICR) & MIICR_WCMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	enable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (ww == W_MAX_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788)  *	set_mii_flow_control	-	flow control setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  *	@vptr: velocity interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  *	Set up the flow control on this interface according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  *	the supplied user/eeprom options.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static void set_mii_flow_control(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/*Enable or Disable PAUSE in ANAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	switch (vptr->options.flow_cntl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	case FLOW_CNTL_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	case FLOW_CNTL_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	case FLOW_CNTL_TX_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	case FLOW_CNTL_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *	mii_set_auto_on		-	autonegotiate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *	Enable autonegotation on this interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static void mii_set_auto_on(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) static u32 check_connection_type(struct mac_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	u32 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	u8 PHYSR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	u16 ANAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	PHYSR0 = readb(&regs->PHYSR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	   if (!(PHYSR0 & PHYSR0_LINKGD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	   status|=VELOCITY_LINK_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (PHYSR0 & PHYSR0_FDPX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		status |= VELOCITY_DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (PHYSR0 & PHYSR0_SPDG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		status |= VELOCITY_SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	else if (PHYSR0 & PHYSR0_SPD10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		status |= VELOCITY_SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		status |= VELOCITY_SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 				status |= VELOCITY_AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  *	velocity_set_media_mode		-	set media mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  *	@vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  *	@mii_status: old MII link state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875)  *	Check the media link state and configure the flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876)  *	PHY and also velocity hardware setup accordingly. In particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877)  *	we need to set up CD polling and frame bursting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* Set mii link status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	set_mii_flow_control(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	 *	If connection type is AUTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		/* clear force MAC mode bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		/* set duplex mode of MAC according to duplex mode of MII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		/* enable AUTO-NEGO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		mii_set_auto_on(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		u16 CTRL1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		u16 ANAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		u8 CHIPGCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		 * 1. if it's 3119, disable frame bursting in halfduplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		 *    and enable it in fullduplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		 * 3. only enable CD heart beat counter in 10HD mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		/* set force MAC mode bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		CHIPGCR = readb(&regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		if (mii_status & VELOCITY_SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			CHIPGCR |= CHIPGCR_FCGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			CHIPGCR &= ~CHIPGCR_FCGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (mii_status & VELOCITY_DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			CHIPGCR |= CHIPGCR_FCFDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			writeb(CHIPGCR, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			netdev_info(vptr->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 				    "set Velocity to forced full mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			if (vptr->rev_id < REV_ID_VT3216_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			CHIPGCR &= ~CHIPGCR_FCFDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			netdev_info(vptr->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				    "set Velocity to forced half mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			writeb(CHIPGCR, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			if (vptr->rev_id < REV_ID_VT3216_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		if ((mii_status & VELOCITY_SPEED_1000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		    (mii_status & VELOCITY_DUPLEX_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			CTRL1000 |= ADVERTISE_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		if (mii_status & VELOCITY_SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			if (mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				ANAR |= ADVERTISE_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				ANAR |= ADVERTISE_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		} else if (mii_status & VELOCITY_SPEED_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			if (mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				ANAR |= ADVERTISE_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 				ANAR |= ADVERTISE_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		/* enable AUTO-NEGO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		mii_set_auto_on(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	return VELOCITY_LINK_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  *	velocity_print_link_status	-	link status reporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  *	@vptr: velocity to report on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  *	Turn the link status of the velocity card into a kernel log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  *	description of the new link state, detailing speed and duplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986)  *	status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static void velocity_print_link_status(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	const char *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	const char *speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	const char *duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		netdev_notice(vptr->netdev, "failed to detect cable link\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		link = "auto-negotiation";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (vptr->mii_status & VELOCITY_SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			speed = "1000";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		else if (vptr->mii_status & VELOCITY_SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			speed = "100";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			speed = "10";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			duplex = "full";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			duplex = "half";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		link = "forced";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		switch (vptr->options.spd_dpx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		case SPD_DPX_1000_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			speed = "1000";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			duplex = "full";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		case SPD_DPX_100_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			speed = "100";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			duplex = "half";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		case SPD_DPX_100_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			speed = "100";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			duplex = "full";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		case SPD_DPX_10_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			speed = "10";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			duplex = "half";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		case SPD_DPX_10_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			speed = "10";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			duplex = "full";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			speed = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			duplex = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		      link, speed, duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  *	enable_flow_control_ability	-	flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  *	@vptr: veloity to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  *	Set up flow control according to the flow control options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)  *	determined by the eeprom/configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static void enable_flow_control_ability(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	switch (vptr->options.flow_cntl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	case FLOW_CNTL_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			writel(CR0_FDXRFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			writel(CR0_FDXRFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			writel(CR0_FDXTFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			writel(CR0_FDXTFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	case FLOW_CNTL_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		writel(CR0_FDXTFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	case FLOW_CNTL_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		writel(CR0_FDXRFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	case FLOW_CNTL_TX_RX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		writel(CR0_FDXTFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		writel(CR0_FDXRFCEN, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	case FLOW_CNTL_DISABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  *	velocity_soft_reset	-	soft reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  *	@vptr: velocity to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  *	Kick off a soft reset of the velocity adapter and then poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  *	until the reset sequence has completed before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int velocity_soft_reset(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	writel(CR0_SFRST, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	for (i = 0; i < W_MAX_TIMEOUT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (i == W_MAX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		writel(CR0_FORSRST, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		/* FIXME: PCI POSTING */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		/* delay 2ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		mdelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)  *	velocity_set_multi	-	filter list change callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)  *	Called by the network layer when the filter lists need to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)  *	for a velocity adapter. Reload the CAMs with the new address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  *	filter ruleset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static void velocity_set_multi(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	u8 rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		writel(0xffffffff, &regs->MARCAM[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		writel(0xffffffff, &regs->MARCAM[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		   (dev->flags & IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		writel(0xffffffff, &regs->MARCAM[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		writel(0xffffffff, &regs->MARCAM[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		rx_mode = (RCR_AM | RCR_AB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		int offset = MCAM_SIZE - vptr->multicast_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		mac_get_cam_mask(regs, vptr->mCAMmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			mac_set_cam(regs, i + offset, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		mac_set_cam_mask(regs, vptr->mCAMmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		rx_mode = RCR_AM | RCR_AB | RCR_AP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (dev->mtu > 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		rx_mode |= RCR_AL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)  * MII access , media link mode setting functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)  *	mii_init	-	set up MII
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  *	@vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  *	@mii_status:  links tatus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  *	Set up the PHY for the current link state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static void mii_init(struct velocity_info *vptr, u32 mii_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	u16 BMCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	case PHYID_ICPLUS_IP101A:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 						MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 								vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 								vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	case PHYID_CICADA_CS8201:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		 *	Reset to hardware default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		 *	off it in NWay-forced half mode for NWay-forced v.s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		 *	legacy-forced issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		 *	Turn on Link/Activity LED enable bit for CIS8201
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	case PHYID_VT3216_32BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	case PHYID_VT3216_64BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		 *	Reset to hardware default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		 *	off it in NWay-forced half mode for NWay-forced v.s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		 *	legacy-forced issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	case PHYID_MARVELL_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	case PHYID_MARVELL_1000S:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		 *	Assert CRS on Transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		 *	Reset to hardware default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	if (BMCR & BMCR_ISOLATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		BMCR &= ~BMCR_ISOLATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  * setup_queue_timers	-	Setup interrupt timers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  * @vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * Setup interrupt frequency during suppression (timeout if the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * count isn't filled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static void setup_queue_timers(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	/* Only for newer revisions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (vptr->rev_id >= REV_ID_VT3216_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		u8 txqueue_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		u8 rxqueue_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 				VELOCITY_SPEED_100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			txqueue_timer = vptr->options.txqueue_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			rxqueue_timer = vptr->options.rxqueue_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * setup_adaptive_interrupts  -  Setup interrupt suppression
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * @vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * The velocity is able to suppress interrupt during high interrupt load.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  * This function turns on that feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static void setup_adaptive_interrupts(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	u16 tx_intsup = vptr->options.tx_intsup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	u16 rx_intsup = vptr->options.rx_intsup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	/* Setup default interrupt mask (will be changed below) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	vptr->int_mask = INT_MASK_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	/* Set Tx Interrupt Suppression Threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	writeb(CAMCR_PS0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (tx_intsup != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 				ISR_PTX2I | ISR_PTX3I);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		writew(tx_intsup, &regs->ISRCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	/* Set Rx Interrupt Suppression Threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	writeb(CAMCR_PS1, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (rx_intsup != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		vptr->int_mask &= ~ISR_PRXI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		writew(rx_intsup, &regs->ISRCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/* Select page to interrupt hold timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	writeb(0, &regs->CAMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  *	velocity_init_registers	-	initialise MAC registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  *	@vptr: velocity to init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  *	@type: type of initialisation (hot or cold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *	Initialise the MAC on a reset or on first set up on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  *	hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) static void velocity_init_registers(struct velocity_info *vptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				    enum velocity_init_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	struct net_device *netdev = vptr->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	int i, mii_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	mac_wol_reset(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	case VELOCITY_INIT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	case VELOCITY_INIT_WOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		 *	Reset RX to prevent RX pointer not on the 4X location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		velocity_rx_reset(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		mac_rx_queue_run(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		mac_rx_queue_wake(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		mii_status = velocity_get_opt_media_mode(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			velocity_print_link_status(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 				netif_wake_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		enable_flow_control_ability(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		mac_clear_isr(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		writel(CR0_STOP, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 							&regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	case VELOCITY_INIT_COLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		 *	Do reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		velocity_soft_reset(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		mdelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		if (!vptr->no_eeprom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			mac_eeprom_reload(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 			for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				writeb(netdev->dev_addr[i], regs->PAR + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		 *	clear Pre_ACPI bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		mac_set_dma_length(regs, vptr->options.DMA_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		 *	Back off algorithm use original IEEE standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		 *	Init CAM filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		velocity_init_cam_filter(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		 *	Set packet filter: Receive directed and broadcast address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		velocity_set_multi(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		 *	Enable MII auto-polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		enable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		setup_adaptive_interrupts(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		writew(vptr->options.numrx - 1, &regs->RDCSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		mac_rx_queue_run(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		mac_rx_queue_wake(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		writew(vptr->options.numtx - 1, &regs->TDCSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		for (i = 0; i < vptr->tx.numq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			mac_tx_queue_run(regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		init_flow_control_register(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		writel(CR0_STOP, &regs->CR0Clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		mii_status = velocity_get_opt_media_mode(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		netif_stop_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		mii_init(vptr, mii_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			velocity_print_link_status(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 				netif_wake_queue(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		enable_flow_control_ability(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		mac_hw_mibs_init(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		mac_write_int_mask(vptr->int_mask, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		mac_clear_isr(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) static void velocity_give_many_rx_descs(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	int avail, dirty, unusable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * RD number must be equal to 4X per hardware spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * (programming guide rev 1.20, p.13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	if (vptr->rx.filled < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	unusable = vptr->rx.filled & 0x0003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	dirty = vptr->rx.dirty - unusable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	vptr->rx.filled = unusable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  *	velocity_init_dma_rings	-	set up DMA rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  *	@vptr: Velocity to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)  *	Allocate PCI mapped DMA rings for the receive and transmit layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)  *	to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static int velocity_init_dma_rings(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	struct velocity_opt *opt = &vptr->options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	dma_addr_t pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	void *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	 * Allocate all RD/TD rings a single pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 * dma_alloc_coherent() fulfills the requirement for 64 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 * alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 				    rx_ring_size, &pool_dma, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			vptr->netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	vptr->rx.ring = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	vptr->rx.pool_dma = pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	pool += rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	pool_dma += rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	for (i = 0; i < vptr->tx.numq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		vptr->tx.rings[i] = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		vptr->tx.pool_dma[i] = pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		pool += tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		pool_dma += tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  *	@idx: ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  *	Allocate a new full sized buffer for the reception of a frame and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  *	map it into PCI space for the hardware to use. The hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  *	requires *64* byte alignment of the buffer which makes life
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  *	less fun than would be ideal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (rd_info->skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	 *	Do the gymnastics to get the buffer head for data at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	 *	64byte alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	skb_reserve(rd_info->skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			64 - ((unsigned long) rd_info->skb->data & 63));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 					vptr->rx.buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	 *	Fill in the descriptor to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	*((u32 *) & (rd->rdesc0)) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	rd->pa_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static int velocity_rx_refill(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	int dirty = vptr->rx.dirty, done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		struct rx_desc *rd = vptr->rx.ring + dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		/* Fine for an all zero Rx desc at init time as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		if (rd->rdesc0.len & OWNED_BY_NIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		if (!vptr->rx.info[dirty].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	} while (dirty != vptr->rx.curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		vptr->rx.dirty = dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		vptr->rx.filled += done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  *	velocity_free_rd_ring	-	free receive ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  *	@vptr: velocity to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  *	Free the receive buffers for each ring slot and any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  *	attached socket buffers that need to go away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static void velocity_free_rd_ring(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (vptr->rx.info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	for (i = 0; i < vptr->options.numrx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		struct rx_desc *rd = vptr->rx.ring + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		memset(rd, 0, sizeof(*rd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		if (!rd_info->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		rd_info->skb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		dev_kfree_skb(rd_info->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		rd_info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	kfree(vptr->rx.info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	vptr->rx.info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)  *	velocity_init_rd_ring	-	set up receive ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  *	@vptr: velocity to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  *	Allocate and set up the receive buffers for each ring slot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  *	assign them to the network adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static int velocity_init_rd_ring(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	vptr->rx.info = kcalloc(vptr->options.numrx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 				sizeof(struct velocity_rd_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (!vptr->rx.info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	velocity_init_rx_ring_indexes(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		velocity_free_rd_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  *	velocity_init_td_ring	-	set up transmit ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  *	@vptr:	velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  *	Set up the transmit ring and chain the ring pointers together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  *	Returns zero on success or a negative posix errno code for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  *	failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static int velocity_init_td_ring(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	/* Init the TD ring entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	for (j = 0; j < vptr->tx.numq; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 					    sizeof(struct velocity_td_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 					    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		if (!vptr->tx.infos[j])	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			while (--j >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 				kfree(vptr->tx.infos[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)  *	velocity_free_dma_rings	-	free PCI ring pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)  *	@vptr: Velocity to free from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)  *	Clean up the PCI ring buffers allocated to this velocity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void velocity_free_dma_rings(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int velocity_init_rings(struct velocity_info *vptr, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	velocity_set_rxbufsize(vptr, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	ret = velocity_init_dma_rings(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	ret = velocity_init_rd_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		goto err_free_dma_rings_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	ret = velocity_init_td_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		goto err_free_rd_ring_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) err_free_rd_ring_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	velocity_free_rd_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) err_free_dma_rings_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	velocity_free_dma_rings(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  *	velocity_free_tx_buf	-	free transmit buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  *	@tdinfo: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)  *	@td: transmit descriptor to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  *	Release an transmit buffer. If the buffer was preallocated then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  *	recycle it, if not then unmap the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) static void velocity_free_tx_buf(struct velocity_info *vptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		struct velocity_td_info *tdinfo, struct tx_desc *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	struct sk_buff *skb = tdinfo->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	 *	Don't unmap the pre-allocated tx_bufs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	for (i = 0; i < tdinfo->nskb_dma; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		/* For scatter-gather */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		if (skb_shinfo(skb)->nr_frags > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 			pktlen = max_t(size_t, pktlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 				       td->td_buf[i].size & ~TD_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 				 le16_to_cpu(pktlen), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	tdinfo->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  *	FIXME: could we merge this with velocity_free_tx_buf ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static void velocity_free_td_ring_entry(struct velocity_info *vptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 							 int q, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (td_info == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	if (td_info->skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		for (i = 0; i < td_info->nskb_dma; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			if (td_info->skb_dma[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 				dma_unmap_single(vptr->dev, td_info->skb_dma[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 					td_info->skb->len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 				td_info->skb_dma[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		dev_kfree_skb(td_info->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		td_info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  *	velocity_free_td_ring	-	free td ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  *	Free up the transmit ring for this particular velocity adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  *	We free the ring contents but not the ring itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static void velocity_free_td_ring(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	for (j = 0; j < vptr->tx.numq; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		if (vptr->tx.infos[j] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		for (i = 0; i < vptr->options.numtx; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			velocity_free_td_ring_entry(vptr, j, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		kfree(vptr->tx.infos[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		vptr->tx.infos[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static void velocity_free_rings(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	velocity_free_td_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	velocity_free_rd_ring(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	velocity_free_dma_rings(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  *	velocity_error	-	handle error from controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  *	@status: card status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  *	Process an error report from the hardware and attempt to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  *	the card itself. At the moment we cannot recover from some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  *	theoretically impossible errors but this could be fixed using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  *	the pci_device_failed logic to bounce the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) static void velocity_error(struct velocity_info *vptr, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	if (status & ISR_TXSTLI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			   readw(&regs->TDIdx[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		writew(TRDCSR_RUN, &regs->TDCSRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		netif_stop_queue(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		/* FIXME: port over the pci_device_failed code and use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		   here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	if (status & ISR_SRCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		int linked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			vptr->mii_status = check_connection_type(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 			 *	If it is a 3119, disable frame bursting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			 *	halfduplex mode and enable it in fullduplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			 *	 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			if (vptr->rev_id < REV_ID_VT3216_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 			 *	Only enable CD heart beat counter in 10HD mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			setup_queue_timers(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		 *	Get link status from PHYSR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		if (linked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 			netif_carrier_on(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			vptr->mii_status |= VELOCITY_LINK_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			netif_carrier_off(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		velocity_print_link_status(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		enable_flow_control_ability(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		 *	Re-enable auto-polling because SRCI will disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		 *	auto-polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		enable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		if (vptr->mii_status & VELOCITY_LINK_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 			netif_stop_queue(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			netif_wake_queue(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	if (status & ISR_MIBFI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		velocity_update_hw_mibs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (status & ISR_LSTEI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		mac_rx_queue_wake(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  *	tx_srv		-	transmit interrupt service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)  *	@vptr: Velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)  *	Scan the queues looking for transmitted packets that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)  *	we can complete and clean up. Update any statistics as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905)  *	necessary/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static int velocity_tx_srv(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	struct tx_desc *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	int qnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	int full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	int works = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	struct velocity_td_info *tdinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	struct net_device_stats *stats = &vptr->netdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 			idx = (idx + 1) % vptr->options.numtx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 			 *	Get Tx Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 			td = &(vptr->tx.rings[qnum][idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 			tdinfo = &(vptr->tx.infos[qnum][idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 			if (td->tdesc0.len & OWNED_BY_NIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			if ((works++ > 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			if (td->tdesc0.TSR & TSR0_TERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 				stats->tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 				stats->tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 				if (td->tdesc0.TSR & TSR0_CDH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 					stats->tx_heartbeat_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 				if (td->tdesc0.TSR & TSR0_CRS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 					stats->tx_carrier_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 				if (td->tdesc0.TSR & TSR0_ABT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 					stats->tx_aborted_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 				if (td->tdesc0.TSR & TSR0_OWC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 					stats->tx_window_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 				stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 				stats->tx_bytes += tdinfo->skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 			velocity_free_tx_buf(vptr, tdinfo, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			vptr->tx.used[qnum]--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		vptr->tx.tail[qnum] = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		if (AVAIL_TD(vptr, qnum) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			full = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	 *	Look to see if we should kick the transmit network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	 *	layer for more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		netif_wake_queue(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	return works;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  *	velocity_rx_csum	-	checksum process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)  *	@rd: receive packet descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)  *	@skb: network layer packet buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)  *	Process the status bits for the received packet and determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)  *	if the checksum was computed and verified by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	if (rd->rdesc1.CSM & CSM_IPKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		if (rd->rdesc1.CSM & CSM_IPOK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 					(rd->rdesc1.CSM & CSM_UDPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 				if (!(rd->rdesc1.CSM & CSM_TUPOK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  *	velocity_rx_copy	-	in place Rx copy for small packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  *	@rx_skb: network layer packet buffer candidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  *	@pkt_size: received data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  *	@vptr: velocity adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  *	Replace the current skb that is scheduled for Rx processing by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  *	shorter, immediately allocated skb, if the received packet is small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  *	enough. This function returns a negative value if the received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  *	packet is too big or if memory is exhausted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 			    struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	if (pkt_size < rx_copybreak) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		struct sk_buff *new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 		if (new_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 			new_skb->ip_summed = rx_skb[0]->ip_summed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			*rx_skb = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)  *	velocity_iph_realign	-	IP header alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)  *	@vptr: velocity we are handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)  *	@skb: network layer packet buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  *	@pkt_size: received data size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  *	Align IP header on a 2 bytes boundary. This behavior can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  *	configured by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static inline void velocity_iph_realign(struct velocity_info *vptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 					struct sk_buff *skb, int pkt_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		memmove(skb->data + 2, skb->data, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		skb_reserve(skb, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *	velocity_receive_frame	-	received packet processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  *	@vptr: velocity we are handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  *	@idx: ring index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  *	A packet has arrived. We process the packet and if appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  *	pass the frame up the network stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) static int velocity_receive_frame(struct velocity_info *vptr, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	struct net_device_stats *stats = &vptr->netdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		stats->rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	if (rd->rdesc0.RSR & RSR_MAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		stats->multicast++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	skb = rd_info->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	velocity_rx_csum(rd, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		velocity_iph_realign(vptr, skb, pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		rd_info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 				 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 					   vptr->rx.buf_sz, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	skb_put(skb, pkt_len - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	skb->protocol = eth_type_trans(skb, vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	if (rd->rdesc0.RSR & RSR_DETAG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	stats->rx_bytes += pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)  *	velocity_rx_srv		-	service RX interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)  *	@budget_left: remaining budget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)  *	Walk the receive ring of the velocity adapter and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)  *	any received packets from the receive queue. Hand the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)  *	slots back to the adapter for reuse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	struct net_device_stats *stats = &vptr->netdev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	int rd_curr = vptr->rx.curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	int works = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	while (works < budget_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		if (!vptr->rx.info[rd_curr].skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		if (rd->rdesc0.len & OWNED_BY_NIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		 *	Don't drop CE or RL error frame although RXOK is off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 			if (velocity_receive_frame(vptr, rd_curr) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 				stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			if (rd->rdesc0.RSR & RSR_CRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 				stats->rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 			if (rd->rdesc0.RSR & RSR_FAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 				stats->rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 			stats->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		rd->size |= RX_INTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		rd_curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		if (rd_curr >= vptr->options.numrx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 			rd_curr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		works++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	vptr->rx.curr = rd_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		velocity_give_many_rx_descs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	VAR_USED(stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	return works;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) static int velocity_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	struct velocity_info *vptr = container_of(napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 			struct velocity_info, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	unsigned int rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	 * Do rx and tx twice for performance (taken from the VIA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	 * out-of-tree driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	rx_done = velocity_rx_srv(vptr, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	velocity_tx_srv(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	/* If budget not fully consumed, exit the polling mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	if (rx_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		napi_complete_done(napi, rx_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		mac_enable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	return rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)  *	velocity_intr		-	interrupt callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)  *	@irq: interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)  *	@dev_instance: interrupting device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)  *	Called whenever an interrupt is generated by the velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)  *	adapter IRQ line. We may not be the source of the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)  *	and need to identify initially if we are, and if not exit as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)  *	efficiently as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) static irqreturn_t velocity_intr(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	struct net_device *dev = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	u32 isr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	spin_lock(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	isr_status = mac_read_isr(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	/* Not us ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	if (isr_status == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		spin_unlock(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	/* Ack the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	mac_write_isr(vptr->mac_regs, isr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	if (likely(napi_schedule_prep(&vptr->napi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		mac_disable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		__napi_schedule(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		velocity_error(vptr, isr_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	spin_unlock(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)  *	velocity_open		-	interface activation callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)  *	@dev: network layer device to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)  *	Called when the network layer brings the interface up. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)  *	a negative posix error code on failure, or zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)  *	All the ring allocation and set up is done on open for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)  *	adapter to minimise memory usage when inactive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) static int velocity_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	ret = velocity_init_rings(vptr, dev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	/* Ensure chip is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	velocity_set_power_state(vptr, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			  dev->name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		/* Power down the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		velocity_free_rings(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	velocity_give_many_rx_descs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	mac_enable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	napi_enable(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	vptr->flags |= VELOCITY_FLAGS_OPENED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)  *	velocity_shutdown	-	shut down the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)  *	@vptr: velocity to deactivate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)  *	Shuts down the internal operations of the velocity and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)  *	disables interrupts, autopolling, transmit and receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) static void velocity_shutdown(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	mac_disable_int(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	writel(CR0_STOP, &regs->CR0Set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	writew(0xFFFF, &regs->TDCSRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	writeb(0xFF, &regs->RDCSRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	safe_disable_mii_autopoll(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	mac_clear_isr(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)  *	velocity_change_mtu	-	MTU change callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)  *	@new_mtu: desired MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)  *	Handle requests from the networking layer for MTU change on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)  *	this interface. It gets called on a change by the network layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)  *	Return zero for success or negative posix error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static int velocity_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	if (!netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		goto out_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	if (dev->mtu != new_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		struct velocity_info *tmp_vptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		struct rx_info rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		struct tx_info tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		if (!tmp_vptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			goto out_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		tmp_vptr->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		tmp_vptr->pdev = vptr->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		tmp_vptr->dev = vptr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		tmp_vptr->options = vptr->options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		tmp_vptr->tx.numq = vptr->tx.numq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		ret = velocity_init_rings(tmp_vptr, new_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 			goto out_free_tmp_vptr_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		napi_disable(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		velocity_shutdown(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		rx = vptr->rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		tx = vptr->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		vptr->rx = tmp_vptr->rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 		vptr->tx = tmp_vptr->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		tmp_vptr->rx = rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		tmp_vptr->tx = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		velocity_give_many_rx_descs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		napi_enable(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		mac_enable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 		velocity_free_rings(tmp_vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) out_free_tmp_vptr_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		kfree(tmp_vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) out_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)  *  velocity_poll_controller		-	Velocity Poll controller function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)  *  @dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)  *  Used by NETCONSOLE and other diagnostic tools to allow network I/P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)  *  with interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static void velocity_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	disable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	velocity_intr(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	enable_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)  *	velocity_mii_ioctl		-	MII ioctl handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)  *	@ifr: the ifreq block for the ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)  *	@cmd: the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)  *	Process MII requests made via ioctl from the network layer. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)  *	are used by tools like kudzu to interrogate the link state of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)  *	hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	struct mii_ioctl_data *miidata = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	case SIOCGMIIPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	case SIOCGMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	case SIOCSMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		check_connection_type(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)  *	velocity_ioctl		-	ioctl entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)  *	@rq: interface request ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)  *	@cmd: command code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)  *	Called when the user issues an ioctl request to the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)  *	device in question. The velocity interface supports MII.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	/* If we are asked for information and the device is power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	   saving then we need to bring the device back up to talk to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		velocity_set_power_state(vptr, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	case SIOCGMIIREG:	/* Read MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	case SIOCSMIIREG:	/* Write to MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		ret = velocity_mii_ioctl(dev, rq, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)  *	velocity_get_status	-	statistics callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)  *	Callback from the network layer to allow driver statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)  *	to be resynchronized with hardware collected state. In the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)  *	case of the velocity we need to pull the MIB counters from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)  *	the hardware into the counters before letting the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)  *	layer display them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) static struct net_device_stats *velocity_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	/* If the hardware is down, don't touch MII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	spin_lock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	velocity_update_hw_mibs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	spin_unlock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) //  unsigned long   rx_dropped;     /* no space in linux buffers    */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	/* detailed rx_errors: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) //  unsigned long   rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) //  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) //  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) //  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) //  unsigned long   rx_missed_errors;   /* receiver missed packet   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	/* detailed tx_errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) //  unsigned long   tx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)  *	velocity_close		-	close adapter callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)  *	Callback from the network layer when the velocity is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)  *	deactivated by the network layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) static int velocity_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	napi_disable(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	velocity_shutdown(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		velocity_get_ip(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	velocity_free_rings(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)  *	velocity_xmit		-	transmit packet callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)  *	@skb: buffer to transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)  *	Called by the networ layer to request a packet is queued to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)  *	the velocity. Returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) static netdev_tx_t velocity_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 				 struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	int qnum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	struct tx_desc *td_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	struct velocity_td_info *tdinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	int pktlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	int index, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	if (skb_padto(skb, ETH_ZLEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	/* The hardware can handle at most 7 memory segments, so merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	 * the skb if there are more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			max_t(unsigned int, skb->len, ETH_ZLEN) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 				skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	index = vptr->tx.curr[qnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	td_ptr = &(vptr->tx.rings[qnum][index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 	tdinfo = &(vptr->tx.infos[qnum][index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	td_ptr->tdesc1.TCR = TCR0_TIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	td_ptr->td_buf[0].size &= ~TD_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	 *	Map the linear network buffer into PCI space and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	 *	add it to the transmit ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	tdinfo->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 								DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	td_ptr->td_buf[0].pa_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	/* Handle fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 							  frag, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 							  skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 							  DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		td_ptr->td_buf[i + 1].pa_high = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 		td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	tdinfo->nskb_dma = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 		td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		td_ptr->tdesc1.TCR |= TCR0_VETAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	 *	Handle hardware checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 		const struct iphdr *ip = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		if (ip->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		else if (ip->protocol == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		td_ptr->tdesc1.TCR |= TCR0_IPCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	prev = index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	if (prev < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		prev = vptr->options.numtx - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	td_ptr->tdesc0.len |= OWNED_BY_NIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	vptr->tx.used[qnum]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	if (AVAIL_TD(vptr, qnum) < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 		netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	td_ptr = &(vptr->tx.rings[qnum][prev]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	td_ptr->td_buf[0].size |= TD_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	mac_tx_queue_wake(vptr->mac_regs, qnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) static const struct net_device_ops velocity_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	.ndo_open		= velocity_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	.ndo_stop		= velocity_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	.ndo_start_xmit		= velocity_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	.ndo_get_stats		= velocity_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	.ndo_validate_addr	= eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	.ndo_set_mac_address	= eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	.ndo_set_rx_mode	= velocity_set_multi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	.ndo_change_mtu		= velocity_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	.ndo_do_ioctl		= velocity_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	.ndo_poll_controller = velocity_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)  *	velocity_init_info	-	init private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)  *	@vptr: Velocity info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)  *	@info: Board type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  *	Set up the initial velocity_info struct for the device that has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  *	discovered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) static void velocity_init_info(struct velocity_info *vptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 				const struct velocity_info_tbl *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	vptr->chip_id = info->chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	vptr->tx.numq = info->txqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	vptr->multicast_limit = MCAM_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	spin_lock_init(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)  *	velocity_get_pci_info	-	retrieve PCI info for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)  *	@vptr: velocity device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)  *	Retrieve the PCI configuration space data that interests us from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)  *	the kernel PCI layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static int velocity_get_pci_info(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	struct pci_dev *pdev = vptr->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	vptr->ioaddr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	vptr->memaddr = pci_resource_start(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 			   "region #0 is not an I/O resource, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 			   "region #1 is an I/O resource, aborting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 		dev_err(&pdev->dev, "region #1 is too small.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)  *	velocity_get_platform_info - retrieve platform info for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)  *	@vptr: velocity device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)  *	Retrieve the Platform configuration data that interests us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) static int velocity_get_platform_info(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		vptr->no_eeprom = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		dev_err(vptr->dev, "unable to find memory address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	vptr->memaddr = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	if (resource_size(&res) < VELOCITY_IO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		dev_err(vptr->dev, "memory region is too small.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  *	velocity_print_info	-	per driver data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  *	Print per driver data as the kernel driver finds Velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  *	hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static void velocity_print_info(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		    get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) static u32 velocity_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)  *	velocity_probe - set up discovered velocity device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)  *	@dev: PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)  *	@info: table of match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)  *	@irq: interrupt info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)  *	@bustype: bus that device is connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)  *	Configure a discovered adapter from scratch. Return a negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)  *	errno error code on failure paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static int velocity_probe(struct device *dev, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			   const struct velocity_info_tbl *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 			   enum velocity_bus_type bustype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	struct velocity_info *vptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	struct mac_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	/* FIXME: this driver, like almost all other ethernet drivers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	 * can support more than MAX_UNITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	if (velocity_nics >= MAX_UNITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		dev_notice(dev, "already found %d NICs.\n", velocity_nics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	netdev = alloc_etherdev(sizeof(struct velocity_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 	/* Chain it all together */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 	SET_NETDEV_DEV(netdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	vptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	netdev->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	vptr->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	vptr->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	velocity_init_info(vptr, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	if (bustype == BUS_PCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		vptr->pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 		ret = velocity_get_pci_info(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		vptr->pdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 		ret = velocity_get_platform_info(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	if (regs == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 		goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	vptr->mac_regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	vptr->rev_id = readb(&regs->rev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	mac_wol_reset(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 		netdev->dev_addr[i] = readb(&regs->PAR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	velocity_get_options(&vptr->options, velocity_nics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	 *	Mask out the options cannot be set to the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	vptr->options.flags &= info->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	 *	Enable the chip specified capbilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	vptr->wol_opts = vptr->options.wol_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	netdev->netdev_ops = &velocity_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 	netdev->ethtool_ops = &velocity_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	netif_napi_add(netdev, &vptr->napi, velocity_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 							VELOCITY_NAPI_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 			   NETIF_F_HW_VLAN_CTAG_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	/* MTU range: 64 - 9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	netdev->min_mtu = VELOCITY_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 	netdev->max_mtu = VELOCITY_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	ret = register_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		goto err_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	if (!velocity_get_link(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		vptr->mii_status |= VELOCITY_LINK_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	velocity_print_info(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	dev_set_drvdata(vptr->dev, netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	/* and leave the chip powered down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	velocity_nics++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) err_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	netif_napi_del(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	iounmap(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) err_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)  *	velocity_remove	- device unplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)  *	@dev: device being removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)  *	Device unload callback. Called on an unplug or on module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)  *	unload for each active device that is present. Disconnects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)  *	the device from the network layer and frees all the resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) static int velocity_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	struct velocity_info *vptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	unregister_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	netif_napi_del(&vptr->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	iounmap(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	velocity_nics--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) static int velocity_pci_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			       const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	const struct velocity_info_tbl *info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 					&chip_info_table[ent->driver_data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	ret = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	ret = pci_request_regions(pdev, VELOCITY_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		dev_err(&pdev->dev, "No PCI resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static void velocity_pci_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	velocity_remove(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) static int velocity_platform_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	const struct velocity_info_tbl *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	of_id = of_match_device(velocity_of_ids, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	if (!of_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	info = of_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) static int velocity_platform_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	velocity_remove(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)  *	wol_calc_crc		-	WOL CRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)  *	@size: size of the wake mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)  *	@pattern: data pattern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)  *	@mask_pattern: mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)  *	Compute the wake on lan crc hashes for the packet header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)  *	we are interested in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	u16 crc = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		mask = mask_pattern[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		/* Skip this loop if the mask equals to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		if (mask == 0x00)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		for (j = 0; j < 8; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 			if ((mask & 0x01) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 				mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 			mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	/*	Finally, invert the result once to get the correct data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	crc = ~crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	return bitrev32(crc) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)  *	velocity_set_wol	-	set up for wake on lan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)  *	@vptr: velocity to set WOL status on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)  *	Set a card up for wake on lan either by unicast or by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)  *	ARP packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)  *	FIXME: check static buffer is safe here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static int velocity_set_wol(struct velocity_info *vptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	enum speed_opt spd_dpx = vptr->options.spd_dpx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 	static u8 buf[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	static u32 mask_pattern[2][4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	writew(0xFFFF, &regs->WOLCRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		struct arp_packet *arp = (struct arp_packet *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 		u16 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 		memset(buf, 0, sizeof(struct arp_packet) + 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 		for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 		arp->type = htons(ETH_P_ARP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		arp->ar_op = htons(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 		memcpy(arp->ar_tip, vptr->ip_addr, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 				(u8 *) & mask_pattern[0][0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		writew(crc, &regs->PatternCRC[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	writew(0x0FFF, &regs->WOLSRClr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 	if (spd_dpx == SPD_DPX_1000_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		goto mac_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	if (spd_dpx != SPD_DPX_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 		goto advertise_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 	if (vptr->mii_status & VELOCITY_SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) advertise_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		u8 GCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		GCR = readb(&regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		writeb(GCR, &regs->CHIPGCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) mac_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	/* Turn on SWPTAG just before entering power mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	/* Go to bed ..... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)  *	velocity_save_context	-	save registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)  *	@context: buffer for stored context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105)  *	Retrieve the current configuration from the velocity hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)  *	and stash it in the context structure, for use by the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)  *	restore functions. This allows us to save things we need across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108)  *	power down states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	u16 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 	u8 __iomem *ptr = (u8 __iomem *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) static int velocity_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	struct velocity_info *vptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	if (!netif_running(vptr->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 	netif_device_detach(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	if (vptr->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		pci_save_state(vptr->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		velocity_get_ip(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		velocity_save_context(vptr, &vptr->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 		velocity_shutdown(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		velocity_set_wol(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		if (vptr->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 			pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		velocity_save_context(vptr, &vptr->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		velocity_shutdown(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 		if (vptr->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 			pci_disable_device(vptr->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)  *	velocity_restore_context	-	restore registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)  *	@vptr: velocity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)  *	@context: buffer for stored context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)  *	Reload the register configuration from the velocity context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)  *	created by velocity_save_context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	u8 __iomem *ptr = (u8 __iomem *)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	/* Just skip cr0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		/* Clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 		/* Set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) static int velocity_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	struct net_device *netdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 	struct velocity_info *vptr = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	if (!netif_running(vptr->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 	velocity_set_power_state(vptr, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	if (vptr->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 		pci_enable_wake(vptr->pdev, PCI_D0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 		pci_restore_state(vptr->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 	mac_wol_reset(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	velocity_restore_context(vptr, &vptr->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	mac_disable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	velocity_tx_srv(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	for (i = 0; i < vptr->tx.numq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 		if (vptr->tx.used[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 			mac_tx_queue_wake(vptr->mac_regs, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	mac_enable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 	netif_device_attach(vptr->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) #endif	/* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)  *	Definition for our device driver. The PCI layer interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)  *	uses this to handle all our card discover and plugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) static struct pci_driver velocity_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	.name		= VELOCITY_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	.id_table	= velocity_pci_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	.probe		= velocity_pci_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	.remove		= velocity_pci_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 		.pm = &velocity_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) static struct platform_driver velocity_platform_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 	.probe		= velocity_platform_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	.remove		= velocity_platform_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 		.name = "via-velocity",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 		.of_match_table = velocity_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 		.pm = &velocity_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)  *	velocity_ethtool_up	-	pre hook for ethtool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)  *	Called before an ethtool operation. We need to make sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)  *	chip is out of D3 state before we poke at it. In case of ethtool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)  *	ops nesting, only wake the device up in the outermost block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) static int velocity_ethtool_up(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	if (vptr->ethtool_ops_nesting == U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 	if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		velocity_set_power_state(vptr, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)  *	velocity_ethtool_down	-	post hook for ethtool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283)  *	@dev: network device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)  *	Called after an ethtool operation. Restore the chip back to D3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)  *	state if it isn't running. In case of ethtool ops nesting, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)  *	put the device to sleep in the outermost block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) static void velocity_ethtool_down(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		velocity_set_power_state(vptr, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) static int velocity_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 				       struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	struct mac_regs __iomem *regs = vptr->mac_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	status = check_connection_type(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	supported = SUPPORTED_TP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 			SUPPORTED_Autoneg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 			SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 			SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 			SUPPORTED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 			SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 			SUPPORTED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 			SUPPORTED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 	advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		advertising |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 			ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 			ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 			ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 			ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 			ADVERTISED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 			ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		switch (vptr->options.spd_dpx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		case SPD_DPX_1000_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 			advertising |= ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		case SPD_DPX_100_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			advertising |= ADVERTISED_100baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		case SPD_DPX_100_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 			advertising |= ADVERTISED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		case SPD_DPX_10_HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 			advertising |= ADVERTISED_10baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		case SPD_DPX_10_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 			advertising |= ADVERTISED_10baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 	if (status & VELOCITY_SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		cmd->base.speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	else if (status & VELOCITY_SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		cmd->base.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		cmd->base.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 	cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		AUTONEG_ENABLE : AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	cmd->base.port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	cmd->base.phy_address = readb(&regs->MIIADR) & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	if (status & VELOCITY_DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		cmd->base.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		cmd->base.duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 						supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 						advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) static int velocity_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 				       const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 	u32 curr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	u32 new_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	curr_status = check_connection_type(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	curr_status &= (~VELOCITY_LINK_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 	new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 	new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		       VELOCITY_DUPLEX_FULL : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	    (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 		enum speed_opt spd_dpx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		if (new_status & VELOCITY_AUTONEG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 			spd_dpx = SPD_DPX_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		else if ((new_status & VELOCITY_SPEED_1000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 			 (new_status & VELOCITY_DUPLEX_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 			spd_dpx = SPD_DPX_1000_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 		} else if (new_status & VELOCITY_SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 				SPD_DPX_100_FULL : SPD_DPX_100_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		else if (new_status & VELOCITY_SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 			spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 				SPD_DPX_10_FULL : SPD_DPX_10_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 		vptr->options.spd_dpx = spd_dpx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 		velocity_set_media_mode(vptr, new_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 	if (vptr->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 		strlcpy(info->bus_info, pci_name(vptr->pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 						sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	wol->wolopts |= WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		   wol.wolopts|=WAKE_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 		wol->wolopts |= WAKE_UCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	if (vptr->wol_opts & VELOCITY_WOL_ARP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		wol->wolopts |= WAKE_ARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	memcpy(&wol->sopass, vptr->wol_passwd, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 	vptr->wol_opts = VELOCITY_WOL_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 	   if (wol.wolopts & WAKE_PHY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	   vptr->wol_opts|=VELOCITY_WOL_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	   }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	if (wol->wolopts & WAKE_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	if (wol->wolopts & WAKE_UCAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		vptr->wol_opts |= VELOCITY_WOL_UCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 	if (wol->wolopts & WAKE_ARP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 		vptr->wol_opts |= VELOCITY_WOL_ARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	memcpy(vptr->wol_passwd, wol->sopass, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) static int get_pending_timer_val(int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	int mult_bits = val >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	int mult = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	switch (mult_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 		mult = 4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 		mult = 16; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 		mult = 64; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 	return (val & 0x3f) * mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) static void set_pending_timer_val(int *val, u32 us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	u8 mult = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	u8 shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	if (us >= 0x3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		mult = 1; /* mult with 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 		shift = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	if (us >= 0x3f * 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		mult = 2; /* mult with 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 		shift = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	if (us >= 0x3f * 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		mult = 3; /* mult with 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		shift = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 	*val = (mult << 6) | ((us >> shift) & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) static int velocity_get_coalesce(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		struct ethtool_coalesce *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) static int velocity_set_coalesce(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		struct ethtool_coalesce *ecmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	int max_us = 0x3f * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	/* 6 bits of  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	if (ecmd->tx_coalesce_usecs > max_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 	if (ecmd->rx_coalesce_usecs > max_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	if (ecmd->tx_max_coalesced_frames > 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 	if (ecmd->rx_max_coalesced_frames > 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	set_pending_timer_val(&vptr->options.rxqueue_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 			ecmd->rx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	set_pending_timer_val(&vptr->options.txqueue_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 			ecmd->tx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	/* Setup the interrupt suppression and queue timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 	spin_lock_irqsave(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	mac_disable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	setup_adaptive_interrupts(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	setup_queue_timers(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	mac_clear_isr(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	mac_enable_int(vptr->mac_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 	spin_unlock_irqrestore(&vptr->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	"rx_all",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	"rx_ok",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	"tx_ok",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	"rx_error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	"rx_runt_ok",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 	"rx_runt_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	"rx_64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	"tx_64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	"rx_65_to_127",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	"tx_65_to_127",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 	"rx_128_to_255",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	"tx_128_to_255",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	"rx_256_to_511",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	"tx_256_to_511",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 	"rx_512_to_1023",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	"tx_512_to_1023",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	"rx_1024_to_1518",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	"tx_1024_to_1518",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	"tx_ether_collisions",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	"rx_crc_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	"rx_jumbo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	"tx_jumbo",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	"rx_mac_control_frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	"tx_mac_control_frames",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	"rx_frame_alignment_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 	"rx_long_ok",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	"rx_long_err",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 	"tx_sqe_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	"rx_no_buf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	"rx_symbol_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 	"in_range_length_errors",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 	"late_collisions"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 		memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) static int velocity_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 	switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 	case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		return ARRAY_SIZE(velocity_gstrings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) static void velocity_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 				       struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 		struct velocity_info *vptr = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 		u32 *p = vptr->mib_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 		spin_lock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		velocity_update_hw_mibs(vptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		spin_unlock_irq(&vptr->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 		for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 			*data++ = *p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) static const struct ethtool_ops velocity_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 				     ETHTOOL_COALESCE_MAX_FRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	.get_drvinfo		= velocity_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	.get_wol		= velocity_ethtool_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 	.set_wol		= velocity_ethtool_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	.get_link		= velocity_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	.get_strings		= velocity_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	.get_sset_count		= velocity_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	.get_ethtool_stats	= velocity_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 	.get_coalesce		= velocity_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 	.set_coalesce		= velocity_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 	.begin			= velocity_ethtool_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	.complete		= velocity_ethtool_down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	.get_link_ksettings	= velocity_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 	.set_link_ksettings	= velocity_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) #if defined(CONFIG_PM) && defined(CONFIG_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	struct in_ifaddr *ifa = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	struct net_device *dev = ifa->ifa_dev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	if (dev_net(dev) == &init_net &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	    dev->netdev_ops == &velocity_netdev_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 		velocity_get_ip(netdev_priv(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) static struct notifier_block velocity_inetaddr_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	.notifier_call	= velocity_netdev_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) static void velocity_register_notifier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	register_inetaddr_notifier(&velocity_inetaddr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) static void velocity_unregister_notifier(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) #define velocity_register_notifier()	do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) #define velocity_unregister_notifier()	do {} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) #endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)  *	velocity_init_module	-	load time function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702)  *	Called when the velocity module is loaded. The PCI driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)  *	is registered with the PCI layer, and in turn will call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)  *	the probe functions for each velocity adapter installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)  *	in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) static int __init velocity_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) 	int ret_pci, ret_platform;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	velocity_register_notifier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	ret_pci = pci_register_driver(&velocity_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	ret_platform = platform_driver_register(&velocity_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	/* if both_registers failed, remove the notifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	if ((ret_pci < 0) && (ret_platform < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 		velocity_unregister_notifier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 		return ret_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726)  *	velocity_cleanup	-	module unload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728)  *	When the velocity hardware is unloaded this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)  *	It will clean up the notifiers and the unregister the PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)  *	driver interface for this hardware. This in turn cleans up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731)  *	all discovered interfaces before returning from the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) static void __exit velocity_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 	velocity_unregister_notifier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	pci_unregister_driver(&velocity_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	platform_driver_unregister(&velocity_platform_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) module_init(velocity_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) module_exit(velocity_cleanup_module);