^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * and other Tigon based cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Thanks to Alteon and 3Com for providing hardware and documentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * enabling me to write this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * A mailing list for discussing the use of this driver has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * setup, please subscribe to the lists if you have any questions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * see how to subscribe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Additional credits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * dump support. The trace dump support has not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * integrated yet however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Troy Benjegerdes: Big Endian (PPC) patches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Nate Stahl: Better out of memory handling and stats support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Aman Singla: Nasty race between interrupt handler and tx code dealing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * with 'testing the tx_ret_csm and setting tx_full'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * infrastructure and Sparc support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * driver under Linux/Sparc64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ETHTOOL_GDRVINFO support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * handler and close() cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * memory mapped IO is enabled to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * make the driver work on RS/6000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * where the driver would disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * bus master mode if it had to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * write and invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * endian systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * rx producer index when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * flushing the Jumbo ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * driver init path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/sockios.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #ifdef SIOCETHTOOL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define DRV_NAME "acenic"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #undef INDEX_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #ifdef CONFIG_ACENIC_OMIT_TIGON_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define ACE_IS_TIGON_I(ap) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define ACE_IS_TIGON_I(ap) (ap->version == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #ifndef PCI_VENDOR_ID_ALTEON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define PCI_VENDOR_ID_ALTEON 0x12ae
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifndef PCI_DEVICE_ID_3COM_3C985
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PCI_DEVICE_ID_3COM_3C985 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifndef PCI_VENDOR_ID_NETGEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define PCI_VENDOR_ID_NETGEAR 0x1385
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifndef PCI_DEVICE_ID_NETGEAR_GA620T
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Farallon used the DEC vendor ID by mistake and they seem not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * to care - stinky!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifndef PCI_DEVICE_ID_FARALLON_PN9100T
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifndef PCI_VENDOR_ID_SGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define PCI_VENDOR_ID_SGI 0x10a9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #ifndef PCI_DEVICE_ID_SGI_ACENIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define PCI_DEVICE_ID_SGI_ACENIC 0x0009
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static const struct pci_device_id acenic_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Farallon used the DEC vendor ID on their cards incorrectly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * then later Alteon's ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define ace_sync_irq(irq) synchronize_irq(irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #ifndef offset_in_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define ACE_MAX_MOD_PARMS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define BOARD_IDX_STATIC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define BOARD_IDX_OVERFLOW -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #include "acenic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * These must be defined before the firmware is included.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define MAX_TEXT_LEN 96*1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define MAX_RODATA_LEN 8*1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define MAX_DATA_LEN 2*1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #ifndef tigon2FwReleaseLocal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define tigon2FwReleaseLocal 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * This driver currently supports Tigon I and Tigon II based cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * GA620. The driver should also work on the SGI, DEC and Farallon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * versions of the card, however I have not been able to test that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * myself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * This card is really neat, it supports receive hardware checksumming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * and jumbo frames (up to 9000 bytes) and does a lot of work in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * firmware. Also the programming interface is quite neat, except for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * the parts dealing with the i2c eeprom on the card ;-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Using jumbo frames:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * To enable jumbo frames, simply specify an mtu between 1500 and 9000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * interface number and <MTU> being the MTU value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Module parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * When compiled as a loadable module, the driver allows for a number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * of module parameters to be specified. The driver supports the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * following module parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * trace=<val> - Firmware trace level. This requires special traced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * firmware to replace the firmware supplied with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * the driver - for debugging purposes only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * link=<val> - Link state. Normally you want to use the default link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * parameters set by the driver. This can be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * override these in case your switch doesn't negotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * the link properly. Valid values are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * 0x0001 - Force half duplex link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * 0x0002 - Do not negotiate line speed with the other end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * 0x0010 - 10Mbit/sec link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * 0x0020 - 100Mbit/sec link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * 0x0040 - 1000Mbit/sec link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * 0x0100 - Do not negotiate flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * 0x0200 - Enable RX flow control Y
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * 0x0400 - Enable TX flow control Y (Tigon II NICs only).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Default value is 0x0270, ie. enable link+flow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * control negotiation. Negotiating the highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * possible link speed with RX flow control enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * When disabling link speed negotiation, only one link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * speed is allowed to be specified!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * to wait for more packets to arive before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * interrupting the host, from the time the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * packet arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * to wait for more packets to arive in the transmit ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * before interrupting the host, after transmitting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * first packet in the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * max_tx_desc=<val> - maximum number of transmit descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * (packets) transmitted before interrupting the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * max_rx_desc=<val> - maximum number of receive descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * (packets) received before interrupting the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * increments of the NIC's on board memory to be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * transmit and receive buffers. For the 1MB NIC app. 800KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * is available, on the 1/2MB NIC app. 300KB is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * 68KB will always be available as a minimum for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * directions. The default value is a 50/50 split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * operations, default (1) is to always disable this as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * that is what Alteon does on NT. I have not been able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * to measure any real performance differences with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * this on my systems. Set <val>=0 if you want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * enable these operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * If you use more than one NIC, specify the parameters for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * run tracing on NIC #2 but not on NIC #1 and #3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * - Proper multicast support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * - NIC dump support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * - More tuning parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * The mini ring is not used under Linux and I am not sure it makes sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * to actually use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * New interrupt handler strategy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * The old interrupt handler worked using the traditional method of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * replacing an skbuff with a new one when a packet arrives. However
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * the rx rings do not need to contain a static number of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * descriptors, thus it makes sense to move the memory allocation out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * of the main interrupt handler and do it in a bottom half handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * and only allocate new buffers when the number of buffers in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * ring is below a certain threshold. In order to avoid starving the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * NIC under heavy load it is however necessary to force allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * when hitting a minimum threshold. The strategy for alloction is as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * RX_LOW_BUF_THRES - allocate buffers in the bottom half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * RX_PANIC_LOW_THRES - we are very low on buffers, allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * the buffers in the interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * RX_RING_THRES - maximum number of buffers in the rx ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * RX_MINI_THRES - maximum number of buffers in the mini ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * One advantagous side effect of this allocation approach is that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * entire rx processing can be done without holding any spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * since the rx rings and registers are totally independent of the tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * ring and its registers. This of course includes the kmalloc's of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * new skb's. Thus start_xmit can run in parallel with rx processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * and the memory allocation on SMP systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Note that running the skb reallocation in a bottom half opens up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * another can of races which needs to be handled properly. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * particular it can happen that the interrupt handler tries to run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * the reallocation while the bottom half is either running on another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * CPU or was interrupted on the same CPU. To get around this the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * driver uses bitops to prevent the reallocation routines from being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * reentered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * TX handling can also be done without holding any spin lock, wheee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * this is fun! since tx_ret_csm is only written to by the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * handler. The case to be aware of is when shutting down the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * and cleaning up where it is necessary to make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * start_xmit() is not running while this is happening. Well DaveM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * informs me that this case is already protected against ... bye bye
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Mr. Spin Lock, it was nice to know you.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * TX interrupts are now partly disabled so the NIC will only generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * TX interrupts for the number of coal ticks, not for the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * TX packets in the queue. This should reduce the number of TX only,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * ie. when no RX processing is done, interrupts seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Threshold values for RX buffer allocation - the low water marks for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * when to start refilling the rings are set to 75% of the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * sizes. It seems to make sense to refill the rings entirely from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * intrrupt handler once it gets below the panic threshold, that way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * we don't risk that the refilling is moved to another CPU when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * one running the interrupt handler just got the slab code hot in its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define RX_RING_SIZE 72
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define RX_MINI_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define RX_JUMBO_SIZE 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define RX_PANIC_STD_THRES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define RX_PANIC_MINI_THRES 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define RX_PANIC_JUMBO_THRES 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Size of the mini ring entries, basically these just should be big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * enough to take TCP ACKs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #define ACE_MINI_SIZE 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #define ACE_MINI_BUFSIZE ACE_MINI_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * There seems to be a magic difference in the effect between 995 and 996
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * but little difference between 900 and 995 ... no idea why.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * There is now a default set of tuning parameters which is set, depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * on whether or not the user enables Jumbo frames. It's assumed that if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * Jumbo frames are enabled, the user wants optimal tuning for that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #define DEF_TX_COAL 400 /* 996 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define DEF_TX_MAX_DESC 60 /* was 40 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define DEF_RX_COAL 120 /* 1000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define DEF_RX_MAX_DESC 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define DEF_TX_RATIO 21 /* 24 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define DEF_JUMBO_TX_COAL 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define DEF_JUMBO_TX_MAX_DESC 60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #define DEF_JUMBO_RX_COAL 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define DEF_JUMBO_RX_MAX_DESC 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define DEF_JUMBO_TX_RATIO 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #if tigon2FwReleaseLocal < 20001118
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Standard firmware and early modifications duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * IRQ load without this flag (coal timer is never reset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Note that with this flag tx_coal should be less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * time to xmit full tx ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * 400usec is not so bad for tx ring size of 128.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #define TX_COAL_INTS_ONLY 1 /* worth it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * With modified firmware, this is not necessary, but still useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define TX_COAL_INTS_ONLY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define DEF_TRACE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #define DEF_STAT (2 * TICKS_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int link_state[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int trace[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int tx_coal_tick[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int rx_coal_tick[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static int max_tx_desc[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int max_rx_desc[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int tx_ratio[ACE_MAX_MOD_PARMS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #ifndef CONFIG_ACENIC_OMIT_TIGON_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) MODULE_FIRMWARE("acenic/tg1.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) MODULE_FIRMWARE("acenic/tg2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) module_param_array_named(link, link_state, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) module_param_array(trace, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) module_param_array(tx_coal_tick, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) module_param_array(max_tx_desc, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) module_param_array(rx_coal_tick, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) module_param_array(max_rx_desc, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) module_param_array(tx_ratio, int, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static const char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) " http://home.cern.ch/~jes/gige/acenic.html\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int ace_get_link_ksettings(struct net_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct ethtool_link_ksettings *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int ace_set_link_ksettings(struct net_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) const struct ethtool_link_ksettings *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static const struct ethtool_ops ace_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) .get_drvinfo = ace_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .get_link_ksettings = ace_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .set_link_ksettings = ace_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) static void ace_watchdog(struct net_device *dev, unsigned int txqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static const struct net_device_ops ace_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .ndo_open = ace_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .ndo_stop = ace_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .ndo_tx_timeout = ace_watchdog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .ndo_get_stats = ace_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) .ndo_start_xmit = ace_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .ndo_set_rx_mode = ace_set_multicast_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) .ndo_set_mac_address = ace_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .ndo_change_mtu = ace_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static int acenic_probe_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct ace_private *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static int boards_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) dev = alloc_etherdev(sizeof(struct ace_private));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (dev == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ap->ndev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ap->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ap->name = pci_name(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev->watchdog_timeo = 5*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dev->min_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dev->max_mtu = ACE_JUMBO_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) dev->netdev_ops = &ace_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dev->ethtool_ops = &ace_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* we only display this string ONCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!boards_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) printk(version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (pci_enable_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto fail_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Enable master mode before we start playing with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * pci_command word since pci_set_master() will modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* OpenFirmware on Mac's does not set this - DOH.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!(ap->pci_command & PCI_COMMAND_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) printk(KERN_INFO "%s: Enabling PCI Memory Mapped "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) "access - was not enabled by BIOS/Firmware\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) pci_write_config_word(ap->pdev, PCI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ap->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ap->pci_latency <= 0x40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ap->pci_latency = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Remap the regs into kernel space - this is abuse of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * dev->base_addr since it was means for I/O port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * addresses but who gives a damn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev->base_addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ap->regs = ioremap(dev->base_addr, 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!ap->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) printk(KERN_ERR "%s: Unable to map I/O register, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) "AceNIC %i will be disabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ap->name, boards_found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto fail_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) switch(pdev->vendor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case PCI_VENDOR_ID_ALTEON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) printk(KERN_INFO "%s: Farallon PN9100-T ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) printk(KERN_INFO "%s: Alteon AceNIC ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case PCI_VENDOR_ID_3COM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) printk(KERN_INFO "%s: 3Com 3C985 ", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case PCI_VENDOR_ID_NETGEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) printk(KERN_INFO "%s: NetGear GA620 ", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) case PCI_VENDOR_ID_DEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) printk(KERN_INFO "%s: Farallon PN9000-SX ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) case PCI_VENDOR_ID_SGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) printk(KERN_INFO "%s: SGI AceNIC ", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) printk(KERN_INFO "%s: Unknown AceNIC ", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) printk("irq %d\n", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) #ifdef CONFIG_ACENIC_OMIT_TIGON_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if ((readl(&ap->regs->HostCtrl) >> 28) == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) printk(KERN_ERR "%s: Driver compiled without Tigon I"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) " support - NIC disabled\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto fail_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ace_allocate_descriptors(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto fail_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) #ifdef MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (boards_found >= ACE_MAX_MOD_PARMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ap->board_idx = BOARD_IDX_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ap->board_idx = boards_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ap->board_idx = BOARD_IDX_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (ace_init(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) goto fail_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (register_netdev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) printk(KERN_ERR "acenic: device registration failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto fail_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ap->name = dev->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ap->pci_using_dac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) boards_found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fail_uninit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ace_init_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) fail_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void acenic_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (ap->version >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * This clears any pending interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) writel(1, ®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) readl(®s->CpuCtrl); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Make sure no other CPUs are processing interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * on the card before the buffers are being released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Otherwise one might experience some `interesting'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * effects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * Then release the RX buffers - jumbo buffers were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * already released in ace_close().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ace_sync_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (i = 0; i < RX_STD_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct ring_info *ringp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ringp = &ap->skb->rx_std_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) mapping = dma_unmap_addr(ringp, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dma_unmap_page(&ap->pdev->dev, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) ap->rx_std_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ap->skb->rx_std_skbuff[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ap->version >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) for (i = 0; i < RX_MINI_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct ring_info *ringp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ringp = &ap->skb->rx_mini_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) mapping = dma_unmap_addr(ringp,mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dma_unmap_page(&ap->pdev->dev, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ACE_MINI_BUFSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ap->rx_mini_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ap->skb->rx_mini_skbuff[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct ring_info *ringp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ringp = &ap->skb->rx_jumbo_skbuff[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) mapping = dma_unmap_addr(ringp, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dma_unmap_page(&ap->pdev->dev, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ap->rx_jumbo_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ap->skb->rx_jumbo_skbuff[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ace_init_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static struct pci_driver acenic_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .name = "acenic",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .id_table = acenic_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .probe = acenic_probe_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .remove = acenic_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) static void ace_free_descriptors(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (ap->rx_std_ring != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) size = (sizeof(struct rx_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) (RX_STD_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) RX_JUMBO_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) RX_MINI_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) RX_RETURN_RING_ENTRIES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ap->rx_ring_base_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ap->rx_std_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ap->rx_jumbo_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ap->rx_mini_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ap->rx_return_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ap->evt_ring != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) size = (sizeof(struct event) * EVT_RING_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ap->evt_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ap->evt_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ap->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ap->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (ap->evt_prd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dma_free_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) (void *)ap->evt_prd, ap->evt_prd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ap->evt_prd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (ap->rx_ret_prd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dma_free_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ap->rx_ret_prd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (ap->tx_csm != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dma_free_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) (void *)ap->tx_csm, ap->tx_csm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ap->tx_csm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int ace_allocate_descriptors(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) size = (sizeof(struct rx_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) (RX_STD_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) RX_JUMBO_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) RX_MINI_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) RX_RETURN_RING_ENTRIES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) &ap->rx_ring_base_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (ap->rx_std_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) size = (sizeof(struct event) * EVT_RING_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) &ap->evt_ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (ap->evt_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * Only allocate a host TX ring for the Tigon II, the Tigon I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * has to use PCI registers for this ;-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) &ap->tx_ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (ap->tx_ring == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) &ap->evt_prd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (ap->evt_prd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) &ap->rx_ret_prd_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (ap->rx_ret_prd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) &ap->tx_csm_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (ap->tx_csm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Clean up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ace_init_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * Generic cleanup handling data allocated during init. Used when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * module is unloaded or if an error occurs during initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void ace_init_cleanup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct ace_private *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ace_free_descriptors(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ap->info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ap->info, ap->info_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) kfree(ap->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) kfree(ap->trace_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (dev->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) free_irq(dev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) iounmap(ap->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Commands are considered to be slow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) idx = readl(®s->CmdPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) writel(*(u32 *)(cmd), ®s->CmdRng[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) idx = (idx + 1) % CMD_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) writel(idx, ®s->CmdPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) static int ace_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct ace_private *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct ace_regs __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct ace_info *info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) unsigned long myjif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) u64 tmp_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u32 tig_ver, mac1, mac2, tmp, pci_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int board_idx, ecode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) unsigned char cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) board_idx = ap->board_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * aman@sgi.com - its useful to do a NIC reset here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * address the `Firmware not running' problem subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * to any crashes involving the NIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) readl(®s->HostCtrl); /* PCI write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * Don't access any other registers before this point!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * This will most likely need BYTE_SWAP once we switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * to using __raw_writel()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ®s->HostCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) readl(®s->HostCtrl); /* PCI write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * Stop the NIC CPU and clear pending interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) readl(®s->CpuCtrl); /* PCI write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) writel(0, ®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) tig_ver = readl(®s->HostCtrl) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) switch(tig_ver){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) #ifndef CONFIG_ACENIC_OMIT_TIGON_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) tig_ver, ap->firmware_major, ap->firmware_minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) ap->firmware_fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) writel(0, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ap->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) tig_ver, ap->firmware_major, ap->firmware_minor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ap->firmware_fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) readl(®s->CpuBCtrl); /* PCI write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * The SRAM bank size does _not_ indicate the amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * of memory on the card, it controls the _bank_ size!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * Ie. a 1MB AceNIC will have two banks of 512KB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) writel(SRAM_BANK_512K, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) writel(SYNC_SRAM_TIMING, ®s->MiscCfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ap->version = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ap->tx_ring_entries = MAX_TX_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) printk(KERN_WARNING " Unsupported Tigon version detected "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) "(%i)\n", tig_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ecode = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * ModeStat _must_ be set after the SRAM settings as this change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * seems to corrupt the ModeStat and possible other registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * The SRAM settings survive resets and setting it to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * value a second time works as well. This is what caused the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * `Firmware not running' problem on the Tigon II.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) readl(®s->ModeStat); /* PCI write posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) mac1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) for(i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mac1 = mac1 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) t = read_eeprom_byte(dev, 0x8c+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (t < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ecode = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mac1 |= (t & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mac2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) for(i = 4; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) int t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) mac2 = mac2 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) t = read_eeprom_byte(dev, 0x8c+i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (t < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ecode = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) mac2 |= (t & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) writel(mac1, ®s->MacAddrHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) writel(mac2, ®s->MacAddrLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dev->dev_addr[0] = (mac1 >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev->dev_addr[1] = mac1 & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dev->dev_addr[2] = (mac2 >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev->dev_addr[3] = (mac2 >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) dev->dev_addr[4] = (mac2 >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) dev->dev_addr[5] = mac2 & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) printk("MAC: %pM\n", dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * Looks like this is necessary to deal with on all architectures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * even this %$#%$# N440BX Intel based thing doesn't get it right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * Ie. having two NICs in the machine, one will have the cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * line set at boot time, the other will not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pdev = ap->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) cache_size <<= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (cache_size != SMP_CACHE_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) printk(KERN_INFO " PCI cache line size set incorrectly "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) "(%i bytes) by BIOS/FW, ", cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (cache_size > SMP_CACHE_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) printk("expecting %i\n", SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) printk("correcting to %i\n", SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) SMP_CACHE_BYTES >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) pci_state = readl(®s->PciState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) "latency: %i clks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) (pci_state & PCI_32BIT) ? 32 : 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) (pci_state & PCI_66MHZ) ? 66 : 33,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ap->pci_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * Set the max DMA transfer size. Seems that for most systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * the performance is better when no MAX parameter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * set. However for systems enabling PCI write and invalidate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * DMA writes must be set to the L1 cache line size to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * optimal performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * The default is now to turn the PCI write and invalidate off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * - that is what Alteon does for NT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) tmp = READ_CMD_MEM | WRITE_CMD_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (ap->version >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Tuning parameters only supported for 8 cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (board_idx == BOARD_IDX_OVERFLOW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) dis_pci_mem_inval[board_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) pci_write_config_word(pdev, PCI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) ap->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) printk(KERN_INFO " Disabling PCI memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) "write and invalidate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) printk(KERN_INFO " PCI memory write & invalidate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) "enabled by BIOS, enabling counter measures\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) switch(SMP_CACHE_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) tmp |= DMA_WRITE_MAX_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) tmp |= DMA_WRITE_MAX_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) tmp |= DMA_WRITE_MAX_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) tmp |= DMA_WRITE_MAX_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) printk(KERN_INFO " Cache line size %i not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) "supported, PCI write and invalidate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) "disabled\n", SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ap->pci_command &= ~PCI_COMMAND_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pci_write_config_word(pdev, PCI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ap->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #ifdef __sparc__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * On this platform, we know what the best dma settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * are. We use 64-byte maximum bursts, because if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * burst larger than the cache line size (or even cross
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * a 64byte boundary in a single burst) the UltraSparc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * PCI controller will disconnect at 64-byte multiples.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * Read-multiple will be properly enabled above, and when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * set will give the PCI controller proper hints about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * prefetching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) tmp &= ~DMA_READ_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) tmp |= DMA_READ_MAX_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) tmp |= DMA_WRITE_MAX_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) tmp &= ~DMA_READ_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) tmp |= DMA_READ_MAX_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * All the docs say MUST NOT. Well, I did.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Nothing terrible happens, if we load wrong size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Bit w&i still works better!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) tmp |= DMA_WRITE_MAX_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) writel(tmp, ®s->PciState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * The Host PCI bus controller driver has to set FBB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * If all devices on that PCI bus support FBB, then the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * can enable FBB support in the Host PCI Bus controller (or on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * the PCI-PCI bridge if that applies).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * -ggg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * I have received reports from people having problems when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * bit is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) printk(KERN_INFO " Enabling PCI Fast Back to Back\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ap->pci_command |= PCI_COMMAND_FAST_BACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * Configure DMA attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ap->pci_using_dac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ap->pci_using_dac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ecode = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * Initialize the generic info block and the command+event rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * and the control blocks for the transmit and receive rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * as they need to be setup once and for all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) &ap->info_dma, GFP_KERNEL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ecode = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ap->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * Get the memory for the skb rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ecode = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) DRV_NAME, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (ecode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) DRV_NAME, pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) #ifdef INDEX_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) spin_lock_init(&ap->debug_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ap->last_std_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ap->last_mini_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ecode = ace_load_firmware(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (ecode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ap->fw_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) tmp_ptr = ap->info_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) writel(tmp_ptr >> 32, ®s->InfoPtrHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) info->evt_ctrl.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) *(ap->evt_prd) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) writel(0, ®s->EvtCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) set_aceaddr(&info->cmd_ctrl.rngptr, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) info->cmd_ctrl.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) info->cmd_ctrl.max_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) for (i = 0; i < CMD_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) writel(0, ®s->CmdRng[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) writel(0, ®s->CmdPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) writel(0, ®s->CmdCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) tmp_ptr = ap->info_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) info->rx_std_ctrl.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) memset(ap->rx_std_ring, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) RX_STD_RING_ENTRIES * sizeof(struct rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) for (i = 0; i < RX_STD_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ap->rx_std_skbprd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) atomic_set(&ap->cur_rx_bufs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) set_aceaddr(&info->rx_jumbo_ctrl.rngptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) (ap->rx_ring_base_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) info->rx_jumbo_ctrl.max_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) info->rx_jumbo_ctrl.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) memset(ap->rx_jumbo_ring, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ap->rx_jumbo_skbprd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) atomic_set(&ap->cur_jumbo_bufs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) memset(ap->rx_mini_ring, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) RX_MINI_RING_ENTRIES * sizeof(struct rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (ap->version >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) set_aceaddr(&info->rx_mini_ctrl.rngptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) (ap->rx_ring_base_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) (sizeof(struct rx_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) (RX_STD_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) RX_JUMBO_RING_ENTRIES))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) info->rx_mini_ctrl.max_len = ACE_MINI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) info->rx_mini_ctrl.flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) for (i = 0; i < RX_MINI_RING_ENTRIES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ap->rx_mini_ring[i].flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) BD_FLG_TCP_UDP_SUM | BD_FLG_MINI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) set_aceaddr(&info->rx_mini_ctrl.rngptr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) info->rx_mini_ctrl.max_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ap->rx_mini_skbprd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) atomic_set(&ap->cur_mini_bufs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) set_aceaddr(&info->rx_return_ctrl.rngptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) (ap->rx_ring_base_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) (sizeof(struct rx_desc) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) (RX_STD_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) RX_JUMBO_RING_ENTRIES +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) RX_MINI_RING_ENTRIES))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) info->rx_return_ctrl.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) memset(ap->rx_return_ring, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) *(ap->rx_ret_prd) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) writel(TX_RING_BASE, ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) ap->tx_ring = (__force struct tx_desc *) regs->Window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) for (i = 0; i < (TIGON_I_TX_RING_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * sizeof(struct tx_desc)) / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) writel(0, (__force void __iomem *)ap->tx_ring + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) memset(ap->tx_ring, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) MAX_TX_RING_ENTRIES * sizeof(struct tx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * The Tigon I does not like having the TX ring in host memory ;-(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (!ACE_IS_TIGON_I(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) tmp |= RCB_FLG_TX_HOST_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) #if TX_COAL_INTS_ONLY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) tmp |= RCB_FLG_COAL_INT_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) info->tx_ctrl.flags = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Potential item for tuning parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) #if 0 /* NO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) writel(DMA_THRESH_16W, ®s->DmaReadCfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) writel(DMA_THRESH_16W, ®s->DmaWriteCfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) writel(DMA_THRESH_8W, ®s->DmaReadCfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) writel(DMA_THRESH_8W, ®s->DmaWriteCfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) writel(0, ®s->MaskInt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) writel(1, ®s->IfIdx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * McKinley boxes do not like us fiddling with AssistState
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * this early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) writel(1, ®s->AssistState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) writel(DEF_STAT, ®s->TuneStatTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) writel(DEF_TRACE, ®s->TuneTrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ace_set_rxtx_parms(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (board_idx == BOARD_IDX_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) printk(KERN_WARNING "%s: more than %i NICs detected, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) "ignoring module parameters!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) ap->name, ACE_MAX_MOD_PARMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) } else if (board_idx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) if (tx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) writel(tx_coal_tick[board_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ®s->TuneTxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (max_tx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (rx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) writel(rx_coal_tick[board_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) ®s->TuneRxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (max_rx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (trace[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) writel(trace[board_idx], ®s->TuneTrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) writel(tx_ratio[board_idx], ®s->TxBufRat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * Default link parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if(ap->version >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) tmp |= LNK_TX_FLOW_CTL_Y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Override link default parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if ((board_idx >= 0) && link_state[board_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) int option = link_state[board_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) tmp = LNK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (option & 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) printk(KERN_INFO "%s: Setting half duplex link\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) tmp &= ~LNK_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (option & 0x02)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) tmp &= ~LNK_NEGOTIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) if (option & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) tmp |= LNK_10MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (option & 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) tmp |= LNK_100MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (option & 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) tmp |= LNK_1000MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if ((option & 0x70) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) printk(KERN_WARNING "%s: No media speed specified, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) "forcing auto negotiation\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) tmp |= LNK_NEGOTIATE | LNK_1000MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) LNK_100MB | LNK_10MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if ((option & 0x100) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) tmp |= LNK_NEG_FCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) printk(KERN_INFO "%s: Disabling flow control "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) "negotiation\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (option & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) tmp |= LNK_RX_FLOW_CTL_Y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if ((option & 0x400) && (ap->version >= 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) printk(KERN_INFO "%s: Enabling TX flow control\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) tmp |= LNK_TX_FLOW_CTL_Y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ap->link = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) writel(tmp, ®s->TuneLink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (ap->version >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) writel(tmp, ®s->TuneFastLink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) writel(ap->firmware_start, ®s->Pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) writel(0, ®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * Set tx_csm before we start receiving interrupts, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * the interrupt handler might think it is supposed to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * tx ints before we are up and running, which may cause a null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * pointer access in the int handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ap->cur_rx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) ace_set_txprd(regs, ap, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) writel(0, ®s->RxRetCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * Enable DMA engine now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * If we do this sooner, Mckinley box pukes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * I assume it's because Tigon II DMA engine wants to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * *something* even before the CPU is started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) writel(1, ®s->AssistState); /* enable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * Start the NIC CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) readl(®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * Wait for the firmware to spin up - max 3 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) myjif = jiffies + 3 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) while (time_before(jiffies, myjif) && !ap->fw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (!ap->fw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ace_dump_trace(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) readl(®s->CpuCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* aman@sgi.com - account for badly behaving firmware/NIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * - have observed that the NIC may continue to generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * interrupts for some reason; attempt to stop it - halt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * second CPU for Tigon II cards, and also clear Mb0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * - if we're a module, we'll fail to load if this was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * the only GbE card in the system => if the kernel does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * see an interrupt from the NIC, code to handle it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * gone and OOps! - so free_irq also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (ap->version >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) writel(readl(®s->CpuBCtrl) | CPU_HALT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ®s->CpuBCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) writel(0, ®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) readl(®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) ecode = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) goto init_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * We load the ring here as there seem to be no way to tell the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * firmware to wipe the ring without re-initializing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (!test_and_set_bit(0, &ap->std_refill_busy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) ace_load_std_rx_ring(dev, RX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (ap->version >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!test_and_set_bit(0, &ap->mini_refill_busy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) printk(KERN_ERR "%s: Someone is busy refilling "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) "the RX mini ring\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) init_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ace_init_cleanup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return ecode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void ace_set_rxtx_parms(struct net_device *dev, int jumbo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int board_idx = ap->board_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (board_idx >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!jumbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (!tx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) writel(DEF_TX_COAL, ®s->TuneTxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!max_tx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (!rx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) writel(DEF_RX_COAL, ®s->TuneRxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (!max_rx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!tx_ratio[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) writel(DEF_TX_RATIO, ®s->TxBufRat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (!tx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) writel(DEF_JUMBO_TX_COAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) ®s->TuneTxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!max_tx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) writel(DEF_JUMBO_TX_MAX_DESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) ®s->TuneMaxTxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!rx_coal_tick[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) writel(DEF_JUMBO_RX_COAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) ®s->TuneRxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (!max_rx_desc[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) writel(DEF_JUMBO_RX_MAX_DESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) ®s->TuneMaxRxDesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (!tx_ratio[board_idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static void ace_watchdog(struct net_device *data, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) struct net_device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * We haven't received a stats update event for more than 2.5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * seconds and there is data in the transmit queue, thus we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * assume the card is stuck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (*ap->tx_csm != ap->tx_ret_csm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) dev->name, (unsigned int)readl(®s->HostCtrl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) /* This can happen due to ieee flow control. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) static void ace_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) struct net_device *dev = ap->ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) int cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) cur_size = atomic_read(&ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if ((cur_size < RX_LOW_STD_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) !test_and_set_bit(0, &ap->std_refill_busy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) printk("refilling buffers (current %i)\n", cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (ap->version >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) cur_size = atomic_read(&ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if ((cur_size < RX_LOW_MINI_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) !test_and_set_bit(0, &ap->mini_refill_busy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) printk("refilling mini buffers (current %i)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) cur_size = atomic_read(&ap->cur_jumbo_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) !test_and_set_bit(0, &ap->jumbo_refill_busy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) printk("refilling jumbo buffers (current %i)\n", cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) ap->tasklet_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * Copy the contents of the NIC's trace buffer to kernel memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static void ace_dump_trace(struct ace_private *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (!ap->trace_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * Load the standard rx ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * Loading rings is safe without holding the spin lock since this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * done only before the device is enabled, thus no interrupts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * generated and by the interrupt handler/tasklet handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) short i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) prefetchw(&ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) idx = ap->rx_std_skbprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) for (i = 0; i < nr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct rx_desc *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) mapping = dma_map_page(&ap->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) ap->skb->rx_std_skbuff[idx].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) rd = &ap->rx_std_ring[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) set_aceaddr(&rd->addr, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) rd->size = ACE_STD_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) rd->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) idx = (idx + 1) % RX_STD_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) atomic_add(i, &ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) ap->rx_std_skbprd = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) cmd.evt = C_SET_RX_PRD_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) cmd.idx = ap->rx_std_skbprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) writel(idx, ®s->RxStdPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) clear_bit(0, &ap->std_refill_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) printk(KERN_INFO "Out of memory when allocating "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) "standard receive buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) short i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) prefetchw(&ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) idx = ap->rx_mini_skbprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) for (i = 0; i < nr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct rx_desc *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) mapping = dma_map_page(&ap->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ACE_MINI_BUFSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ap->skb->rx_mini_skbuff[idx].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) rd = &ap->rx_mini_ring[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) set_aceaddr(&rd->addr, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) rd->size = ACE_MINI_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) rd->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) idx = (idx + 1) % RX_MINI_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) atomic_add(i, &ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ap->rx_mini_skbprd = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) writel(idx, ®s->RxMiniPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) clear_bit(0, &ap->mini_refill_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) printk(KERN_INFO "Out of memory when allocating "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) "mini receive buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * Load the jumbo rx ring, this may happen at any time if the MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * is changed to a value > 1500.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) short i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) idx = ap->rx_jumbo_skbprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) for (i = 0; i < nr_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct rx_desc *rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) mapping = dma_map_page(&ap->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) ap->skb->rx_jumbo_skbuff[idx].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) rd = &ap->rx_jumbo_ring[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) set_aceaddr(&rd->addr, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) rd->size = ACE_JUMBO_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) rd->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) atomic_add(i, &ap->cur_jumbo_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) ap->rx_jumbo_skbprd = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) cmd.idx = ap->rx_jumbo_skbprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) writel(idx, ®s->RxJumboPrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) clear_bit(0, &ap->jumbo_refill_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) printk(KERN_INFO "Out of memory when allocating "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) "jumbo receive buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * All events are considered to be slow (RX/TX ints do not generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * events) and are handled here, outside the main interrupt handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * to reduce the size of the handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct ace_private *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) while (evtcsm != evtprd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) switch (ap->evt_ring[evtcsm].evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) case E_FW_RUNNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) printk(KERN_INFO "%s: Firmware up and running\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ap->fw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case E_STATS_UPDATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) case E_LNK_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) u16 code = ap->evt_ring[evtcsm].code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) case E_C_LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) u32 state = readl(&ap->regs->GigLnkState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) printk(KERN_WARNING "%s: Optical link UP "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) "(%s Duplex, Flow Control: %s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ap->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) state & LNK_FULL_DUPLEX ? "Full":"Half",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) state & LNK_TX_FLOW_CTL_Y ? "TX " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) state & LNK_RX_FLOW_CTL_Y ? "RX" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) case E_C_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) printk(KERN_WARNING "%s: Optical link DOWN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) case E_C_LINK_10_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) printk(KERN_WARNING "%s: 10/100BaseT link "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) "UP\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) printk(KERN_ERR "%s: Unknown optical link "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) "state %02x\n", ap->name, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) case E_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) switch(ap->evt_ring[evtcsm].code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) case E_C_ERR_INVAL_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) printk(KERN_ERR "%s: invalid command error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) case E_C_ERR_UNIMP_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) printk(KERN_ERR "%s: unimplemented command "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) "error\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) case E_C_ERR_BAD_CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) printk(KERN_ERR "%s: bad config error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) printk(KERN_ERR "%s: unknown error %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) ap->name, ap->evt_ring[evtcsm].code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) case E_RESET_JUMBO_RNG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (ap->skb->rx_jumbo_skbuff[i].skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) ap->rx_jumbo_ring[i].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ap->skb->rx_jumbo_skbuff[i].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) cmd.evt = C_SET_RX_JUMBO_PRD_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) ace_issue_cmd(ap->regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) writel(0, &((ap->regs)->RxJumboPrd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) ap->jumbo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) ap->rx_jumbo_skbprd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) printk(KERN_INFO "%s: Jumbo ring flushed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) clear_bit(0, &ap->jumbo_refill_busy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) printk(KERN_ERR "%s: Unhandled event 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) ap->name, ap->evt_ring[evtcsm].evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return evtcsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) int mini_count = 0, std_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) idx = rxretcsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) prefetchw(&ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) prefetchw(&ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) while (idx != rxretprd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct ring_info *rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct rx_desc *retdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) u32 skbidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) int bd_flags, desc_type, mapsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) u16 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) /* make sure the rx descriptor isn't read before rxretprd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (idx == rxretcsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) retdesc = &ap->rx_return_ring[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) skbidx = retdesc->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) bd_flags = retdesc->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) switch(desc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) * Normal frames do not have any flags set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * Mini and normal frames arrive frequently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * so use a local counter to avoid doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * atomic operations for each packet arriving.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) rip = &ap->skb->rx_std_skbuff[skbidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) mapsize = ACE_STD_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) std_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) case BD_FLG_JUMBO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) rip = &ap->skb->rx_jumbo_skbuff[skbidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) mapsize = ACE_JUMBO_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) atomic_dec(&ap->cur_jumbo_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) case BD_FLG_MINI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) rip = &ap->skb->rx_mini_skbuff[skbidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) mapsize = ACE_MINI_BUFSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) mini_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) printk(KERN_INFO "%s: unknown frame type (0x%02x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) "returned by NIC\n", dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) retdesc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) skb = rip->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) rip->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) mapsize, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) skb_put(skb, retdesc->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * Fly baby, fly!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) csum = retdesc->tcp_udp_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * Instead of forcing the poor tigon mips cpu to calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * pseudo hdr checksum, we do this ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (bd_flags & BD_FLG_TCP_UDP_SUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) skb->csum = htons(csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) skb->ip_summed = CHECKSUM_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* send it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if ((bd_flags & BD_FLG_VLAN_TAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), retdesc->vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) dev->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) dev->stats.rx_bytes += retdesc->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) idx = (idx + 1) % RX_RETURN_RING_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) atomic_sub(std_count, &ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (!ACE_IS_TIGON_I(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) atomic_sub(mini_count, &ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * According to the documentation RxRetCsm is obsolete with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * the 12.3.x Firmware - my Tigon I NICs seem to disagree!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) writel(idx, &ap->regs->RxRetCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) ap->cur_rx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) idx = rxretprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static inline void ace_tx_int(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) u32 txcsm, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct tx_ring_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) info = ap->skb->tx_skbuff + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) skb = info->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (dma_unmap_len(info, maplen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) dma_unmap_page(&ap->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) dma_unmap_addr(info, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) dma_unmap_len(info, maplen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) dma_unmap_len_set(info, maplen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) } while (idx != txcsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (netif_queue_stopped(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) ap->tx_ret_csm = txcsm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /* So... tx_ret_csm is advanced _after_ check for device wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * We could try to make it before. In this case we would get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * the following race condition: hard_start_xmit on other cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * enters after we advanced tx_ret_csm and fills space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * which we have just freed, so that we make illegal device wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * There is no good way to workaround this (at entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * to ace_start_xmit detects this condition and prevents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * ring corruption, but it is not a good workaround.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * When tx_ret_csm is advanced after, we wake up device _only_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * if we really have some space in ring (though the core doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * hard_start_xmit can see full ring for some period and has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * synchronize.) Superb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * BUT! We get another subtle race condition. hard_start_xmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * may think that ring is full between wakeup and advancing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * tx_ret_csm and will stop device instantly! It is not so bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * We are guaranteed that there is something in ring, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * the next irq will resume transmission. To speedup this we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * mark descriptor, which closes ring with BD_FLG_COAL_NOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * (see ace_start_xmit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * Well, this dilemma exists in all lock-free devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) * We, following scheme used in drivers by Donald Becker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) * select the least dangerous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * --ANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static irqreturn_t ace_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) struct net_device *dev = (struct net_device *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) u32 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) u32 txcsm, rxretcsm, rxretprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) u32 evtcsm, evtprd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * In case of PCI shared interrupts or spurious interrupts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * we want to make sure it is actually our interrupt before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * spending any time in here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (!(readl(®s->HostCtrl) & IN_INT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * ACK intr now. Otherwise we will lose updates to rx_ret_prd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * which happened _after_ rxretprd = *ap->rx_ret_prd; but before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * writel(0, ®s->Mb0Lo).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) * "IRQ avoidance" recommended in docs applies to IRQs served
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * threads and it is wrong even for that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) writel(0, ®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) readl(®s->Mb0Lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * There is no conflict between transmit handling in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * start_xmit and receive processing, thus there is no reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * to take a spin lock for RX handling. Wait until we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * working on the other stuff - hey we don't need a spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) rxretprd = *ap->rx_ret_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) rxretcsm = ap->cur_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (rxretprd != rxretcsm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) ace_rx_int(dev, rxretprd, rxretcsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) txcsm = *ap->tx_csm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) idx = ap->tx_ret_csm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (txcsm != idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * If each skb takes only one descriptor this check degenerates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * to identity, because new space has just been opened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * But if skbs are fragmented we must check that this index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * update releases enough of space, otherwise we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * wait for device to make more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (!tx_ring_full(ap, txcsm, ap->tx_prd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) ace_tx_int(dev, txcsm, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) evtcsm = readl(®s->EvtCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) evtprd = *ap->evt_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (evtcsm != evtprd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) evtcsm = ace_handle_event(dev, evtcsm, evtprd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) writel(evtcsm, ®s->EvtCsm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * This has to go last in the interrupt handler and run with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * the spin lock released ... what lock?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) int cur_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) int run_tasklet = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) cur_size = atomic_read(&ap->cur_rx_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (cur_size < RX_LOW_STD_THRES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if ((cur_size < RX_PANIC_STD_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) !test_and_set_bit(0, &ap->std_refill_busy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) printk("low on std buffers %i\n", cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) ace_load_std_rx_ring(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) RX_RING_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) run_tasklet = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (!ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) cur_size = atomic_read(&ap->cur_mini_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (cur_size < RX_LOW_MINI_THRES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if ((cur_size < RX_PANIC_MINI_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) !test_and_set_bit(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) &ap->mini_refill_busy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) printk("low on mini buffers %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) ace_load_mini_rx_ring(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) RX_MINI_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) run_tasklet = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (ap->jumbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) cur_size = atomic_read(&ap->cur_jumbo_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (cur_size < RX_LOW_JUMBO_THRES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if ((cur_size < RX_PANIC_JUMBO_THRES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) !test_and_set_bit(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) &ap->jumbo_refill_busy)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) printk("low on jumbo buffers %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) ace_load_jumbo_rx_ring(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) RX_JUMBO_SIZE - cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) run_tasklet = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (run_tasklet && !ap->tasklet_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) ap->tasklet_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) tasklet_schedule(&ap->ace_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static int ace_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (!(ap->fw_running)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) printk(KERN_WARNING "%s: Firmware not running!\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) cmd.evt = C_CLEAR_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) cmd.evt = C_HOST_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) cmd.code = C_C_STACK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) if (ap->jumbo &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) !test_and_set_bit(0, &ap->jumbo_refill_busy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) cmd.evt = C_SET_PROMISC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) cmd.code = C_C_PROMISC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) ap->promisc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) ap->promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) ap->mcast_all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) cmd.evt = C_LNK_NEGOTIATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * Setup the bottom half rx ring refill handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) tasklet_setup(&ap->ace_tasklet, ace_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) static int ace_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * Without (or before) releasing irq and stopping hardware, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * is an absolute non-sense, by the way. It will be reset instantly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * by the first irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (ap->promisc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) cmd.evt = C_SET_PROMISC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) cmd.code = C_C_PROMISC_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) ap->promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) cmd.evt = C_HOST_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) cmd.code = C_C_STACK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) tasklet_kill(&ap->ace_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * Make sure one CPU is not processing packets while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) * buffers are being released by another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) ace_mask_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) struct tx_ring_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) info = ap->skb->tx_skbuff + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) skb = info->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (dma_unmap_len(info, maplen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) /* NB: TIGON_1 is special, tx_ring is in io space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct tx_desc __iomem *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) writel(0, &tx->addr.addrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) writel(0, &tx->addr.addrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) writel(0, &tx->flagsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) memset(ap->tx_ring + i, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) sizeof(struct tx_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) dma_unmap_page(&ap->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) dma_unmap_addr(info, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) dma_unmap_len(info, maplen),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) dma_unmap_len_set(info, maplen, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) if (ap->jumbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) cmd.evt = C_RESET_JUMBO_RNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ace_unmask_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) static inline dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) struct sk_buff *tail, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct tx_ring_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) offset_in_page(skb->data), skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) info = ap->skb->tx_skbuff + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) info->skb = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) dma_unmap_addr_set(info, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) dma_unmap_len_set(info, maplen, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) u32 flagsize, u32 vlan_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) #if !USE_TX_COAL_NOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) flagsize &= ~BD_FLG_COAL_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) if (ACE_IS_TIGON_I(ap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) writel(addr >> 32, &io->addr.addrhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) writel(addr & 0xffffffff, &io->addr.addrlo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) writel(flagsize, &io->flagsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) writel(vlan_tag, &io->vlanres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) desc->addr.addrhi = addr >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) desc->addr.addrlo = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) desc->flagsize = flagsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) desc->vlanres = vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) struct tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) u32 idx, flagsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) unsigned long maxjiff = jiffies + 3*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) idx = ap->tx_prd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (tx_ring_full(ap, ap->tx_ret_csm, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) goto overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (!skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) u32 vlan_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) mapping = ace_map_tx_skb(ap, skb, skb, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) flagsize = (skb->len << 16) | (BD_FLG_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) flagsize |= BD_FLG_TCP_UDP_SUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) flagsize |= BD_FLG_VLAN_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) vlan_tag = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) desc = ap->tx_ring + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) /* Look at ace_tx_int for explanations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (tx_ring_full(ap, ap->tx_ret_csm, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) flagsize |= BD_FLG_COAL_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) u32 vlan_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) int i, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) mapping = ace_map_tx_skb(ap, skb, NULL, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) flagsize = (skb_headlen(skb) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) flagsize |= BD_FLG_TCP_UDP_SUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) flagsize |= BD_FLG_VLAN_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) vlan_tag = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) struct tx_ring_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) len += skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) info = ap->skb->tx_skbuff + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) desc = ap->tx_ring + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) flagsize = skb_frag_size(frag) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) flagsize |= BD_FLG_TCP_UDP_SUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (i == skb_shinfo(skb)->nr_frags - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) flagsize |= BD_FLG_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (tx_ring_full(ap, ap->tx_ret_csm, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) flagsize |= BD_FLG_COAL_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * Only the last fragment frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * the skb!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) info->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) info->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) dma_unmap_addr_set(info, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) dma_unmap_len_set(info, maplen, skb_frag_size(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) ap->tx_prd = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) ace_set_txprd(regs, ap, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) if (flagsize & BD_FLG_COAL_NOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) * A TX-descriptor producer (an IRQ) might have gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) * between, making the ring free again. Since xmit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) * serialized, this is the only situation we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * re-test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (!tx_ring_full(ap, ap->tx_ret_csm, idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * This race condition is unavoidable with lock-free drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * We wake up the queue _before_ tx_prd is advanced, so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * enter hard_start_xmit too early, while tx ring still looks closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * This happens ~1-4 times per 100000 packets, so that we can allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * to loop syncing to other CPU. Probably, we need an additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * wmb() in ace_tx_intr as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * Note that this race is relieved by reserving one more entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) * in tx ring than it is necessary (see original non-SG driver).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * is already overkill.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * Alternative is to return with 1 not throttling queue. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * case loop becomes longer, no more useful effects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (time_before(jiffies, maxjiff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* The ring is stuck full. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static int ace_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) if (new_mtu > ACE_STD_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (!(ap->jumbo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) printk(KERN_INFO "%s: Enabling Jumbo frame "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) "support\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) ap->jumbo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ace_set_rxtx_parms(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) while (test_and_set_bit(0, &ap->jumbo_refill_busy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ace_sync_irq(dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ace_set_rxtx_parms(dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (ap->jumbo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) cmd.evt = C_RESET_JUMBO_RNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) static int ace_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) u32 supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) memset(cmd, 0, sizeof(struct ethtool_link_ksettings));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) SUPPORTED_Autoneg | SUPPORTED_FIBRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) link = readl(®s->GigLnkState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (link & LNK_1000MB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) cmd->base.speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) link = readl(®s->FastLnkState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (link & LNK_100MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) cmd->base.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) else if (link & LNK_10MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) cmd->base.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) cmd->base.speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if (link & LNK_FULL_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) cmd->base.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) cmd->base.duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (link & LNK_NEGOTIATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) * Current struct ethtool_cmd is insufficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) ecmd->trace = readl(®s->TuneTrace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ecmd->txcoal = readl(®s->TuneTxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) ecmd->rxcoal = readl(®s->TuneRxCoalTicks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) static int ace_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) u32 link, speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) link = readl(®s->GigLnkState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (link & LNK_1000MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) link = readl(®s->FastLnkState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (link & LNK_100MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) else if (link & LNK_10MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (!ACE_IS_TIGON_I(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) link |= LNK_TX_FLOW_CTL_Y;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (cmd->base.autoneg == AUTONEG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) link |= LNK_NEGOTIATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (cmd->base.speed != speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) switch (cmd->base.speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) link |= LNK_1000MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) link |= LNK_100MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) link |= LNK_10MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (cmd->base.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) link |= LNK_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (link != ap->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) printk(KERN_INFO "%s: Renegotiating link state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) ap->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) writel(link, ®s->TuneLink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (!ACE_IS_TIGON_I(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) writel(link, ®s->TuneFastLink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) cmd.evt = C_LNK_NEGOTIATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) static void ace_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) strlcpy(info->driver, "acenic", sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (ap->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) strlcpy(info->bus_info, pci_name(ap->pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * Set the hardware MAC address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) static int ace_set_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) struct sockaddr *addr=p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) u8 *da;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if(netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) da = (u8 *)dev->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) writel(da[0] << 8 | da[1], ®s->MacAddrHi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) ®s->MacAddrLo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) cmd.evt = C_SET_MAC_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) cmd.code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static void ace_set_multicast_list(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) struct cmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) cmd.evt = C_SET_MULTICAST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) cmd.code = C_C_MCAST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) ap->mcast_all = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) } else if (ap->mcast_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) cmd.evt = C_SET_MULTICAST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) cmd.code = C_C_MCAST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) ap->mcast_all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) cmd.evt = C_SET_PROMISC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) cmd.code = C_C_PROMISC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) ap->promisc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) cmd.evt = C_SET_PROMISC_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) cmd.code = C_C_PROMISC_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ap->promisc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) * For the time being multicast relies on the upper layers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * filtering it properly. The Firmware does not allow one to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) * set the entire multicast list at a time and keeping track of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * it here is going to be messy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) if (!netdev_mc_empty(dev) && !ap->mcast_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) cmd.evt = C_SET_MULTICAST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) cmd.code = C_C_MCAST_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }else if (!ap->mcast_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) cmd.evt = C_SET_MULTICAST_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) cmd.code = C_C_MCAST_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) cmd.idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) ace_issue_cmd(regs, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static struct net_device_stats *ace_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct ace_mac_stats __iomem *mac_stats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) (struct ace_mac_stats __iomem *)ap->regs->Stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) dev->stats.rx_missed_errors = readl(&mac_stats->drop_space);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) dev->stats.multicast = readl(&mac_stats->kept_mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) dev->stats.collisions = readl(&mac_stats->coll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) return &dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) u32 dest, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) void __iomem *tdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) short tsize, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (size <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) min_t(u32, size, ACE_WINDOW_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) tdest = (void __iomem *) ®s->Window +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) (dest & (ACE_WINDOW_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) for (i = 0; i < (tsize / 4); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /* Firmware is big-endian */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) writel(be32_to_cpup(src), tdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) tdest += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) dest += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) size -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) void __iomem *tdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) short tsize = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (size <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) min_t(u32, size, ACE_WINDOW_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) tdest = (void __iomem *) ®s->Window +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) (dest & (ACE_WINDOW_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) for (i = 0; i < (tsize / 4); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) writel(0, tdest + i*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) dest += tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) size -= tsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) * Download the firmware into the SRAM on the NIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * This operation requires the NIC to be halted and is performed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * interrupts disabled and with the spinlock hold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) static int ace_load_firmware(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) const char *fw_name = "acenic/tg2.bin";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) u32 load_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (!(readl(®s->CpuCtrl) & CPU_HALTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) printk(KERN_ERR "%s: trying to download firmware while the "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) "CPU is running!\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (ACE_IS_TIGON_I(ap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) fw_name = "acenic/tg1.bin";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) ret = request_firmware(&fw, fw_name, &ap->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) ap->name, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) fw_data = (void *)fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* Firmware blob starts with version numbers, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) load and start address. Remainder is the blob to be loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) contiguously from load address. We don't bother to represent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) the BSS/SBSS sections any more, since we were clearing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) whole thing anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) ap->firmware_major = fw->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) ap->firmware_minor = fw->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) ap->firmware_fix = fw->data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) ap->firmware_start = be32_to_cpu(fw_data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) ap->name, ap->firmware_start, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) load_addr = be32_to_cpu(fw_data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) if (load_addr < 0x4000 || load_addr >= 0x80000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) ap->name, load_addr, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * Do not try to clear more than 512KiB or we end up seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * funny things on NICs with only 512KiB SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) ace_clear(regs, 0x2000, 0x80000-0x2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) ace_copy(regs, &fw_data[3], load_addr, fw->size-12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * The eeprom on the AceNIC is an Atmel i2c EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) * Accessing the EEPROM is `interesting' to say the least - don't read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * this code right after dinner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) * This is all about black magic and bit-banging the device .... I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) * wonder in what hospital they have put the guy who designed the i2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) * specs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) * Oh yes, this is only the beginning!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) * Thanks to Stevarino Webinski for helping tracking down the bugs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) * code i2c readout code by beta testing all my hacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) static void eeprom_start(struct ace_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) u32 local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) local |= EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) local &= ~EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) local &= ~EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) u32 local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) local &= ~EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) local |= EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) for (i = 0; i < 8; i++, magic <<= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) if (magic & 0x80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) local |= EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) local &= ~EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) local |= EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) static int eeprom_check_ack(struct ace_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) u32 local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) local &= ~EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) udelay(ACE_LONG_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) local |= EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) /* sample data in middle of high clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) static void eeprom_stop(struct ace_regs __iomem *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) u32 local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) local |= EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) local &= ~EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) local |= EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) local |= EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) udelay(ACE_LONG_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) local &= ~EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * Read a whole byte from the EEPROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) struct ace_private *ap = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) struct ace_regs __iomem *regs = ap->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) u32 local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * Don't take interrupts on this CPU will bit banging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * the %#%#@$ I2C device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) eeprom_start(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) eeprom_prep(regs, EEPROM_WRITE_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (eeprom_check_ack(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) goto eeprom_read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) eeprom_prep(regs, (offset >> 8) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) if (eeprom_check_ack(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) printk(KERN_ERR "%s: Unable to set address byte 0\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) goto eeprom_read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) eeprom_prep(regs, offset & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (eeprom_check_ack(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) printk(KERN_ERR "%s: Unable to set address byte 1\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) goto eeprom_read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) eeprom_start(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) eeprom_prep(regs, EEPROM_READ_SELECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (eeprom_check_ack(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) printk(KERN_ERR "%s: Unable to set READ_SELECT\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) ap->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) result = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) goto eeprom_read_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) local &= ~EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) udelay(ACE_LONG_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) local |= EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) /* sample data mid high clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) result = (result << 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) local = readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) local &= ~EEPROM_CLK_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) if (i == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) local |= EEPROM_WRITE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) local |= EEPROM_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) writel(local, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) udelay(ACE_LONG_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) readl(®s->LocalCtrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) udelay(ACE_SHORT_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) eeprom_stop(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) eeprom_read_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) ap->name, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) module_pci_driver(acenic_pci_driver);