^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004 Sun Microsystems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This driver uses the sungem driver (c) David Miller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (davem@redhat.com) as its basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * The cassini chip has a number of features that distinguish it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * the gem chip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * 4 transmit descriptor rings that are used for either QoS (VLAN) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * load balancing (non-VLAN mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * batching of multiple packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * multiple CPU dispatching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * page-based RX descriptor engine with separate completion rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Gigabit support (GMII and PCS interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * MIF link up/down detection works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * RX is handled by page sized buffers that are attached as fragments to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * the skb. here's what's done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * -- driver allocates pages at a time and keeps reference counts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * on them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * -- the upper protocol layers assume that the header is in the skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * itself. as a result, cassini will copy a small amount (64 bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * to make them happy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * -- driver appends the rest of the data pages as frags to skbuffs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * and increments the reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * -- on page reclamation, the driver swaps the page with a spare page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * if that page is still in use, it frees its reference to that page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * and allocates a new page for use. otherwise, it just recycles the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * NOTE: cassini can parse the header. however, it's not worth it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * as long as the network stack requires a header copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * TX has 4 queues. currently these queues are used in a round-robin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * fashion for load balancing. They can also be used for QoS. for that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * to work, however, QoS information needs to be exposed down to the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * level so that subqueues get targeted to particular transmit rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * alternatively, the queues can be configured via use of the all-purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * RX DATA: the rx completion ring has all the info, but the rx desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * ring has all of the data. RX can conceivably come in under multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * interrupts, but the INT# assignment needs to be set up properly by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * that. also, the two descriptor rings are designed to distinguish between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * encrypted and non-encrypted packets, but we use them for buffering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * by default, the selective clear mask is set up to process rx packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define cas_page_map(x) kmap_atomic((x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define cas_page_unmap(x) kunmap_atomic((x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define CAS_NCPUS num_online_cpus()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define cas_skb_release(x) netif_rx(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* select which firmware to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define USE_HP_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #include "cassini.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define USE_TX_COMPWB /* use completion writeback registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define USE_RX_BLANK /* hw interrupt mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #undef USE_ENTROPY_DEV /* don't test for entropy device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* NOTE: these aren't useable unless PCI interrupts can be assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * also, we need to make cp->lock finer-grained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #undef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #undef USE_PCI_INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #undef USE_PCI_INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #undef USE_QOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #undef USE_VPD_DEBUG /* debug vpd information if defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* rx processing options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define USE_PAGE_ORDER /* specify to allocate large rx pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define RX_COPY_ALWAYS 0 /* if 0, use frags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define DRV_MODULE_NAME "cassini"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define DRV_MODULE_VERSION "1.6"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define DRV_MODULE_RELDATE "21 May 2008"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define CAS_DEF_MSG_ENABLE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) (NETIF_MSG_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) NETIF_MSG_PROBE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) NETIF_MSG_LINK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) NETIF_MSG_TIMER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) NETIF_MSG_IFDOWN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) NETIF_MSG_IFUP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) NETIF_MSG_RX_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) NETIF_MSG_TX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* length of time before we decide the hardware is borked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * and dev->tx_timeout() should be called to fix the problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define CAS_TX_TIMEOUT (HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define CAS_LINK_TIMEOUT (22*HZ/10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define CAS_LINK_FAST_TIMEOUT (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* timeout values for state changing. these specify the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * of 10us delays to be used before giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define STOP_TRIES_PHY 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define STOP_TRIES 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* specify a minimum frame size to deal with some fifo issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * max mtu == 2 * page size - ethernet header - 64 - swivel =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * 2 * page_size - 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define CAS_MIN_FRAME 97
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define CAS_1000MB_MIN_FRAME 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define CAS_MIN_MTU 60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Eliminate these and use separate atomic counters for each, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * avoid a race condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define CAS_RESET_MTU 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define CAS_RESET_ALL 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define CAS_RESET_SPARE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static char version[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static int link_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) MODULE_FIRMWARE("sun/cassini.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) module_param(cassini_debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) module_param(link_mode, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) MODULE_PARM_DESC(link_mode, "default link mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Work around for a PCS bug in which the link goes down due to the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * being confused and never showing a link status of "up."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define DEFAULT_LINKDOWN_TIMEOUT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Value in seconds, for user input.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) module_param(linkdown_timeout, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) MODULE_PARM_DESC(linkdown_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * value in 'ticks' (units used by jiffies). Set when we init the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * module because 'HZ' in actually a function call on some flavors of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static int link_transition_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static u16 link_modes[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BMCR_ANENABLE, /* 0 : autoneg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 0, /* 1 : 10bt half duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) BMCR_SPEED100, /* 2 : 100bt half duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) BMCR_FULLDPLX, /* 3 : 10bt full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static const struct pci_device_id cas_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void cas_set_link_modes(struct cas *cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline void cas_lock_tx(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) for (i = 0; i < N_TX_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) spin_lock_nested(&cp->tx_lock[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* WTZ: QA was finding deadlock problems with the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * versions after long test runs with multiple cards per machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * See if replacing cas_lock_all with safer versions helps. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * symptoms QA is reporting match those we'd expect if interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * aren't being properly restored, and we fixed a previous deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * with similar symptoms by using save/restore versions in other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define cas_lock_all_save(cp, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct cas *xxxcp = (cp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spin_lock_irqsave(&xxxcp->lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) cas_lock_tx(xxxcp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static inline void cas_unlock_tx(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) for (i = N_TX_RINGS; i > 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spin_unlock(&cp->tx_lock[i - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define cas_unlock_all_restore(cp, flags) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct cas *xxxcp = (cp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) cas_unlock_tx(xxxcp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) spin_unlock_irqrestore(&xxxcp->lock, flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void cas_disable_irq(struct cas *cp, const int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Make sure we won't get any more interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (ring == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* disable completion interrupts and selectively mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) switch (ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #ifdef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #ifdef USE_PCI_INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #ifdef USE_PCI_INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cp->regs + REG_PLUS_INTRN_MASK(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) writel(INTRN_MASK_CLEAR_ALL, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) REG_PLUS_INTRN_MASK(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static inline void cas_mask_intr(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = 0; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) cas_disable_irq(cp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void cas_enable_irq(struct cas *cp, const int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (ring == 0) { /* all but TX_DONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) switch (ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #ifdef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef USE_PCI_INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef USE_PCI_INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) writel(INTRN_MASK_RX_EN, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) REG_PLUS_INTRN_MASK(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static inline void cas_unmask_intr(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) for (i = 0; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) cas_enable_irq(cp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline void cas_entropy_gather(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #ifdef USE_ENTROPY_DEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) readl(cp->regs + REG_ENTROPY_IV),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) sizeof(uint64_t)*8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static inline void cas_entropy_reset(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #ifdef USE_ENTROPY_DEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cp->regs + REG_BIM_LOCAL_DEV_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* if we read back 0x0, we don't have an entropy device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* access to the phy. the following assumes that we've initialized the MIF to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * be in frame rather than bit-bang mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static u16 cas_phy_read(struct cas *cp, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) int limit = STOP_TRIES_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) cmd |= MIF_FRAME_TURN_AROUND_MSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) writel(cmd, cp->regs + REG_MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* poll for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) while (limit-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cmd = readl(cp->regs + REG_MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (cmd & MIF_FRAME_TURN_AROUND_LSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return cmd & MIF_FRAME_DATA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0xFFFF; /* -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int cas_phy_write(struct cas *cp, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int limit = STOP_TRIES_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) cmd |= MIF_FRAME_TURN_AROUND_MSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) cmd |= val & MIF_FRAME_DATA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) writel(cmd, cp->regs + REG_MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* poll for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) while (limit-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) cmd = readl(cp->regs + REG_MIF_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (cmd & MIF_FRAME_TURN_AROUND_LSB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void cas_phy_powerup(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u16 ctl = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if ((ctl & BMCR_PDOWN) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ctl &= ~BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cas_phy_write(cp, MII_BMCR, ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void cas_phy_powerdown(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) u16 ctl = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (ctl & BMCR_PDOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ctl |= BMCR_PDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) cas_phy_write(cp, MII_BMCR, ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* cp->lock held. note: the last put_page will free the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int cas_page_free(struct cas *cp, cas_page_t *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) __free_pages(page->buffer, cp->page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) kfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #ifdef RX_COUNT_BUFFERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #define RX_USED_ADD(x, y) ((x)->used += (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #define RX_USED_SET(x, y) ((x)->used = (y))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #define RX_USED_ADD(x, y) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #define RX_USED_SET(x, y) do { } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* local page allocation routines for the receive buffers. jumbo pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * require at least 8K contiguous and 8K aligned buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) cas_page_t *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) page = kmalloc(sizeof(cas_page_t), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) INIT_LIST_HEAD(&page->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) RX_USED_SET(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) page->buffer = alloc_pages(flags, cp->page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!page->buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto page_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) cp->page_size, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) page_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) kfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* initialize spare pool of rx buffers, but allocate during the open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void cas_spare_init(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) INIT_LIST_HEAD(&cp->rx_inuse_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) INIT_LIST_HEAD(&cp->rx_spare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) cp->rx_spares_needed = RX_SPARE_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* used on close. free all the spare buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void cas_spare_free(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct list_head list, *elem, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* free spare buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) list_splice_init(&cp->rx_spare_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) list_for_each_safe(elem, tmp, &list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cas_page_free(cp, list_entry(elem, cas_page_t, list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Looks like Adrian had protected this with a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * lock than used everywhere else to manipulate this list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) list_splice_init(&cp->rx_inuse_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) list_splice_init(&cp->rx_inuse_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) list_for_each_safe(elem, tmp, &list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) cas_page_free(cp, list_entry(elem, cas_page_t, list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* replenish spares if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void cas_spare_recover(struct cas *cp, const gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct list_head list, *elem, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int needed, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* check inuse list. if we don't need any more free buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * just free it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* make a local copy of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) list_splice_init(&cp->rx_inuse_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_for_each_safe(elem, tmp, &list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) cas_page_t *page = list_entry(elem, cas_page_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * With the lockless pagecache, cassini buffering scheme gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * slightly less accurate: we might find that a page has an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * elevated reference count here, due to a speculative ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * and skip it as in-use. Ideally we would be able to reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * it. However this would be such a rare case, it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * matter too much as we should pick it up the next time round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Importantly, if we find that the page has a refcount of 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * here (our refcount), then we know it is definitely not inuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * so we can reuse it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (page_count(page->buffer) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) list_del(elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (cp->rx_spares_needed > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) list_add(elem, &cp->rx_spare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) cp->rx_spares_needed--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) cas_page_free(cp, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* put any inuse buffers back on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (!list_empty(&list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) list_splice(&list, &cp->rx_inuse_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) needed = cp->rx_spares_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (!needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* we still need spares, so try to allocate some */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) INIT_LIST_HEAD(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) while (i < needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) cas_page_t *spare = cas_page_alloc(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!spare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) list_add(&spare->list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) list_splice(&list, &cp->rx_spare_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cp->rx_spares_needed -= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* pull a page from the list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static cas_page_t *cas_page_dequeue(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int recover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (list_empty(&cp->rx_spare_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* try to do a quick recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) cas_spare_recover(cp, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) spin_lock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (list_empty(&cp->rx_spare_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) netif_err(cp, rx_err, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) "no spare buffers available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) entry = cp->rx_spare_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) list_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) recover = ++cp->rx_spares_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) spin_unlock(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* trigger the timer to do the recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) atomic_inc(&cp->reset_task_pending_spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return list_entry(entry, cas_page_t, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void cas_mif_poll(struct cas *cp, const int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) u32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) cfg = readl(cp->regs + REG_MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (cp->phy_type & CAS_PHY_MII_MDIO1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) cfg |= MIF_CFG_PHY_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* poll and interrupt on link status change. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cfg |= MIF_CFG_POLL_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) cp->regs + REG_MIF_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) writel(cfg, cp->regs + REG_MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Must be invoked under cp->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static void cas_begin_auto_negotiation(struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) const struct ethtool_link_ksettings *ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u16 ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int lcntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int oldstate = cp->lstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int link_was_not_down = !(oldstate == link_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* Setup link parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!ep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) goto start_aneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) lcntl = cp->link_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (ep->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) cp->link_cntl = BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u32 speed = ep->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) cp->link_cntl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) cp->link_cntl |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) else if (speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) cp->link_cntl |= CAS_BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (ep->base.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cp->link_cntl |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) changed = (lcntl != cp->link_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) start_aneg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (cp->lstate == link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) netdev_info(cp->dev, "PCS link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) netdev_info(cp->dev, "link configuration changed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) cp->link_transition = LINK_TRANSITION_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (!cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * WTZ: If the old state was link_up, we turn off the carrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * to replicate everything we do elsewhere on a link-down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * event when we were already in a link-up state..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (oldstate == link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) netif_carrier_off(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (changed && link_was_not_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * WTZ: This branch will simply schedule a full reset after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * we explicitly changed link modes in an ioctl. See if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * fixes the link-problems we were having for forced mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) atomic_inc(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) cp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (cp->phy_type & CAS_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (cp->link_cntl & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) cp->lstate = link_aneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (cp->link_cntl & BMCR_FULLDPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) val |= PCS_MII_CTRL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) val &= ~PCS_MII_AUTONEG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) cp->lstate = link_force_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) writel(val, cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ctl = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ctl |= cp->link_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (ctl & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ctl |= BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) cp->lstate = link_aneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) cp->lstate = link_force_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) cas_phy_write(cp, MII_BMCR, ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) cp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int cas_reset_mii_phy(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int limit = STOP_TRIES_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) cas_phy_write(cp, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) while (--limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) val = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if ((val & BMCR_RESET) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return limit <= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static void cas_saturn_firmware_init(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) const char fw_name[] = "sun/cassini.bin";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (PHY_NS_DP83065 != cp->phy_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) err = request_firmware(&fw, fw_name, &cp->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) pr_err("Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (fw->size < 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) pr_err("bogus length %zu in \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cp->fw_size = fw->size - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) cp->fw_data = vmalloc(cp->fw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!cp->fw_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) static void cas_saturn_firmware_load(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!cp->fw_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) cas_phy_powerdown(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* expanded memory access mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) cas_phy_write(cp, DP83065_MII_MEM, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* pointer configuration for new firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) cas_phy_write(cp, DP83065_MII_REGD, 0x82);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) cas_phy_write(cp, DP83065_MII_REGD, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cas_phy_write(cp, DP83065_MII_REGD, 0x39);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* download new firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) cas_phy_write(cp, DP83065_MII_MEM, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) for (i = 0; i < cp->fw_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* enable firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) cas_phy_write(cp, DP83065_MII_REGD, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* phy initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static void cas_phy_init(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* if we're in MII/GMII mode, set up phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) writel(PCS_DATAPATH_MODE_MII,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) cp->regs + REG_PCS_DATAPATH_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) cas_reset_mii_phy(cp); /* take out of isolate mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (PHY_LUCENT_B0 == cp->phy_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* workaround link up/down issue with lucent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) cas_phy_write(cp, MII_BMCR, 0x00f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) cas_phy_write(cp, LUCENT_MII_REG, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* workarounds for broadcom phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) } else if (PHY_BROADCOM_5411 == cp->phy_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) val = cas_phy_read(cp, BROADCOM_MII_REG4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) val = cas_phy_read(cp, BROADCOM_MII_REG4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (val & 0x0080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* link workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) cas_phy_write(cp, BROADCOM_MII_REG4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) val & ~0x0080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) } else if (cp->cas_flags & CAS_FLAG_SATURN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) SATURN_PCFG_FSI : 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) cp->regs + REG_SATURN_PCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* load firmware to address 10Mbps auto-negotiation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * issue. NOTE: this will need to be changed if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * default firmware gets fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (PHY_NS_DP83065 == cp->phy_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) cas_saturn_firmware_load(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cas_phy_powerup(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* advertise capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) val = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) val &= ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) cas_phy_write(cp, MII_BMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cas_phy_write(cp, MII_ADVERTISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) cas_phy_read(cp, MII_ADVERTISE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) (ADVERTISE_10HALF | ADVERTISE_10FULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ADVERTISE_100HALF | ADVERTISE_100FULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) CAS_ADVERTISE_PAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) CAS_ADVERTISE_ASYM_PAUSE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* make sure that we don't advertise half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * duplex to avoid a chip issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) val = cas_phy_read(cp, CAS_MII_1000_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) val &= ~CAS_ADVERTISE_1000HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) val |= CAS_ADVERTISE_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) cas_phy_write(cp, CAS_MII_1000_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* reset pcs for serdes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) writel(PCS_DATAPATH_MODE_SERDES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) cp->regs + REG_PCS_DATAPATH_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* enable serdes pins on saturn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (cp->cas_flags & CAS_FLAG_SATURN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) writel(0, cp->regs + REG_SATURN_PCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* Reset PCS unit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) val = readl(cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) val |= PCS_MII_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) writel(val, cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) limit = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) while (--limit > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if ((readl(cp->regs + REG_PCS_MII_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) PCS_MII_RESET) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (limit <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) readl(cp->regs + REG_PCS_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Make sure PCS is disabled while changing advertisement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) writel(0x0, cp->regs + REG_PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Advertise all capabilities except half-duplex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) val = readl(cp->regs + REG_PCS_MII_ADVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) val &= ~PCS_MII_ADVERT_HD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) PCS_MII_ADVERT_ASYM_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) writel(val, cp->regs + REG_PCS_MII_ADVERT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* enable PCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* pcs workaround: enable sync detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) writel(PCS_SERDES_CTRL_SYNCD_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) cp->regs + REG_PCS_SERDES_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static int cas_pcs_link_check(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) u32 stat, state_machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* The link status bit latches on zero, so you must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * read it twice in such a case to see a transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * to the link being up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) stat = readl(cp->regs + REG_PCS_MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) stat = readl(cp->regs + REG_PCS_MII_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* The remote-fault indication is only valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * when autoneg has completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) PCS_MII_STATUS_REMOTE_FAULT)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* work around link detection issue by querying the PCS state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * machine directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) stat &= ~PCS_MII_STATUS_LINK_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) stat |= PCS_MII_STATUS_LINK_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (stat & PCS_MII_STATUS_LINK_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (cp->lstate != link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (cp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) cp->lstate = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) cp->link_transition = LINK_TRANSITION_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) cas_set_link_modes(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) netif_carrier_on(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else if (cp->lstate == link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) cp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (link_transition_timeout != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) !cp->link_transition_jiffies_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * force a reset, as a workaround for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * link-failure problem. May want to move this to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * point a bit earlier in the sequence. If we had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * generated a reset a short time ago, we'll wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * the link timer to check the status until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * timer expires (link_transistion_jiffies_valid is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * true when the timer is running.) Instead of using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * a system timer, we just do a check whenever the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * link timer is running - this clears the flag after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * a suitable delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) cp->link_transition_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) cp->link_transition_jiffies_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cp->link_transition = LINK_TRANSITION_ON_FAILURE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) netif_carrier_off(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (cp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) netif_info(cp, link, cp->dev, "PCS link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /* Cassini only: if you force a mode, there can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * sync problems on link down. to fix that, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * things need to be checked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * 1) read serialink state register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * 2) read pcs status register to verify link down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * 3) if link down and serial link == 0x03, then you need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * to global reset the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) /* should check to see if we're in a forced mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) stat = readl(cp->regs + REG_PCS_SERDES_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (stat == 0x03)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) } else if (cp->lstate == link_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (link_transition_timeout != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) !cp->link_transition_jiffies_valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* force a reset, as a workaround for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * link-failure problem. May want to move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * this to a point a bit earlier in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) cp->link_transition_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) cp->link_transition_jiffies_valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) cp->link_transition = LINK_TRANSITION_STILL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int cas_pcs_interrupt(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct cas *cp, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return cas_pcs_link_check(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) static int cas_txmac_interrupt(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) struct cas *cp, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (!txmac_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) netif_printk(cp, intr, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /* Defer timer expiration is quite normal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * don't even log the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) !(txmac_stat & ~MAC_TX_DEFER_TIMER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (txmac_stat & MAC_TX_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) netdev_err(dev, "TX MAC xmit underrun\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) cp->net_stats[0].tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) netdev_err(dev, "TX MAC max packet size error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) cp->net_stats[0].tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* The rest are all cases of one of the 16-bit TX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * counters expiring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (txmac_stat & MAC_TX_COLL_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) cp->net_stats[0].collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (txmac_stat & MAC_TX_COLL_EXCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) cp->net_stats[0].tx_aborted_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) cp->net_stats[0].collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (txmac_stat & MAC_TX_COLL_LATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) cp->net_stats[0].tx_aborted_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) cp->net_stats[0].collisions += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* We do not keep track of MAC_TX_COLL_FIRST and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * MAC_TX_PEAK_ATTEMPTS events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) cas_hp_inst_t *inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) while ((inst = firmware) && inst->note) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ++firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static void cas_init_rx_dma(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) u64 desc_dma = cp->block_dvma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) /* rx free descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if ((N_RX_DESC_RINGS > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) writel(val, cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) val = (unsigned long) cp->init_rxds[0] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) (unsigned long) cp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* rx desc 2 is for IPSEC packets. however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * we don't it that for that purpose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) val = (unsigned long) cp->init_rxds[1] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) (unsigned long) cp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) writel((desc_dma + val) & 0xffffffff, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) REG_PLUS_RX_DB1_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) REG_PLUS_RX_KICK1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* rx completion registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) val = (unsigned long) cp->init_rxcs[0] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) (unsigned long) cp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* rx comp 2-4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) val = (unsigned long) cp->init_rxcs[i] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) (unsigned long) cp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) writel((desc_dma + val) >> 32, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) REG_PLUS_RX_CBN_HI(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) writel((desc_dma + val) & 0xffffffff, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) REG_PLUS_RX_CBN_LOW(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* read selective clear regs to prevent spurious interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * on reset because complete == kick.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * selective clear set up to prevent interrupts on resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) readl(cp->regs + REG_INTR_STATUS_ALIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) for (i = 1; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /* 2 is different from 3 and 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (N_RX_COMP_RINGS > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) cp->regs + REG_PLUS_ALIASN_CLEAR(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) for (i = 2; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) writel(INTR_RX_DONE_ALT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) cp->regs + REG_PLUS_ALIASN_CLEAR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* set up pause thresholds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) val = CAS_BASE(RX_PAUSE_THRESH_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) val |= CAS_BASE(RX_PAUSE_THRESH_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) writel(val, cp->regs + REG_RX_PAUSE_THRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* zero out dma reassembly buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) for (i = 0; i < 64; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) writel(i, cp->regs + REG_RX_TABLE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* make sure address register is 0 for normal operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /* interrupt mitigation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) #ifdef USE_RX_BLANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) writel(val, cp->regs + REG_RX_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) writel(0x0, cp->regs + REG_RX_BLANK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) /* interrupt generation as a function of low water marks for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * free desc and completion entries. these are used to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * housekeeping for rx descs. we don't use the free interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * as it's not very useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) writel(val, cp->regs + REG_RX_AE_THRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /* Random early detect registers. useful for congestion avoidance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * this should be tunable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) writel(0x0, cp->regs + REG_RX_RED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /* receive page sizes. default == 2K (0x800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (cp->page_size == 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) val = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) else if (cp->page_size == 0x2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) val = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) else if (cp->page_size == 0x4000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) val = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /* round mtu + offset. constrain to page size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) size = cp->dev->mtu + 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (size > cp->page_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) size = cp->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (size <= 0x400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) i = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) else if (size <= 0x800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) i = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) else if (size <= 0x1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) i = 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) i = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) cp->mtu_stride = 1 << (i + 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) val = CAS_BASE(RX_PAGE_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) writel(val, cp->regs + REG_RX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /* enable the header parser if desired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (CAS_HP_FIRMWARE == cas_prog_null)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) writel(val, cp->regs + REG_HP_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static inline void cas_rxc_init(struct cas_rx_comp *rxc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) memset(rxc, 0, sizeof(*rxc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * flipping is protected by the fact that the chip will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * hand back the same page index while it's being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) cas_page_t *page = cp->rx_pages[1][index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) cas_page_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (page_count(page->buffer) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) new = cas_page_dequeue(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) list_add(&page->list, &cp->rx_inuse_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* this needs to be changed if we actually use the ENC RX DESC ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) const int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) cas_page_t **page0 = cp->rx_pages[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) cas_page_t **page1 = cp->rx_pages[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) /* swap if buffer is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (page_count(page0[index]->buffer) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) cas_page_t *new = cas_page_spare(cp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) page1[index] = page0[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) page0[index] = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) RX_USED_SET(page0[index], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return page0[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static void cas_clean_rxds(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* only clean ring 0 as ring 1 is used for spare buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) struct cas_rx_desc *rxd = cp->init_rxds[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) /* release all rx flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) for (i = 0; i < N_RX_FLOWS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) cas_skb_release(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /* initialize descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) size = RX_DESC_RINGN_SIZE(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) cas_page_t *page = cas_page_swap(cp, 0, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) rxd[i].buffer = cpu_to_le64(page->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) CAS_BASE(RX_INDEX_RING, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) cp->rx_last[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static void cas_clean_rxcs(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* take ownership of rx comp descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) for (i = 0; i < N_RX_COMP_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct cas_rx_comp *rxc = cp->init_rxcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) cas_rxc_init(rxc + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) /* When we get a RX fifo overflow, the RX unit is probably hung
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) * so we do the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * If any part of the reset goes wrong, we return 1 and that causes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * whole chip to be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static int cas_rxmac_reset(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct net_device *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* First, reset MAC RX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) for (limit = 0; limit < STOP_TRIES; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (limit == STOP_TRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) /* Second, disable RX DMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) writel(0, cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) for (limit = 0; limit < STOP_TRIES; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (limit == STOP_TRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) mdelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* Execute RX reset command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) for (limit = 0; limit < STOP_TRIES; limit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (limit == STOP_TRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /* reset driver rx state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) cas_clean_rxds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) cas_clean_rxcs(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* Now, reprogram the rest of RX unit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) cas_init_rx_dma(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /* re-enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) val = readl(cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) val = readl(cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* these are all rollovers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (stat & MAC_RX_ALIGN_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) cp->net_stats[0].rx_frame_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (stat & MAC_RX_CRC_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) cp->net_stats[0].rx_crc_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (stat & MAC_RX_LEN_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) cp->net_stats[0].rx_length_errors += 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (stat & MAC_RX_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) cp->net_stats[0].rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) cp->net_stats[0].rx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) netif_printk(cp, intr, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) "mac interrupt, stat: 0x%x\n", stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /* This interrupt is just for pause frame and pause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * tracking. It is useful for diagnostics and debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * but probably by default we will mask these events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (stat & MAC_CTRL_PAUSE_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) cp->pause_entered++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (stat & MAC_CTRL_PAUSE_RECEIVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) cp->pause_last_time_recvd = (stat >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static inline int cas_mdio_link_not_up(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) switch (cp->lstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) case link_force_ret:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) cp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cp->lstate = link_force_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) case link_aneg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) val = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* Try forced modes. we try things in the following order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * 1000 full -> 100 full/half -> 10 half
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) val |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) CAS_BMCR_SPEED1000 : BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) cas_phy_write(cp, MII_BMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) cp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) cp->lstate = link_force_try;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) case link_force_try:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) val = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) cp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (val & CAS_BMCR_SPEED1000) { /* gigabit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) val &= ~CAS_BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) cas_phy_write(cp, MII_BMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (val & BMCR_SPEED100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (val & BMCR_FULLDPLX) /* fd failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) val &= ~BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) else { /* 100Mbps failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) val &= ~BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) cas_phy_write(cp, MII_BMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* must be invoked with cp->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) int restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /* Ok, here we got a link. If we had it due to a forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * fallback, and we were configured for autoneg, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * retry a short autoneg pass. If you know your hub is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * broken, use ethtool ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if ((cp->lstate == link_force_try) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) (cp->link_cntl & BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) cp->lstate = link_force_ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) cp->timer_ticks = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (cp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) netif_info(cp, link, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) "Got link after fallback, retrying autoneg once...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) cas_phy_write(cp, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) cp->link_fcntl | BMCR_ANENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) } else if (cp->lstate != link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) cp->lstate = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) cp->link_transition = LINK_TRANSITION_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (cp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) cas_set_link_modes(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) netif_carrier_on(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /* link not up. if the link was previously up, we restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * whole process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) restart = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (cp->lstate == link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) cp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) cp->link_transition = LINK_TRANSITION_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) netif_carrier_off(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (cp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) netif_info(cp, link, cp->dev, "Link down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) } else if (++cp->timer_ticks > 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) cas_mdio_link_not_up(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) u32 stat = readl(cp->regs + REG_MIF_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) /* check for a link change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return cas_mii_link_check(cp, bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) netdev_err(dev, "PCI error [%04x:%04x]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) stat, readl(cp->regs + REG_BIM_DIAG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /* cassini+ has this reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) if ((stat & PCI_ERR_BADACK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) pr_cont(" <No ACK64# during ABS64 cycle>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (stat & PCI_ERR_DTRTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) pr_cont(" <Delayed transaction timeout>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (stat & PCI_ERR_OTHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) pr_cont(" <other>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (stat & PCI_ERR_BIM_DMA_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) pr_cont(" <BIM DMA 0 write req>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (stat & PCI_ERR_BIM_DMA_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) pr_cont(" <BIM DMA 0 read req>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (stat & PCI_ERR_OTHER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int pci_errs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /* Interrogate PCI config space for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * true cause.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) pci_errs = pci_status_get_and_clear_errors(cp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (pci_errs & PCI_STATUS_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) netdev_err(dev, "PCI parity error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) netdev_err(dev, "PCI target abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) netdev_err(dev, "PCI master acks target abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) netdev_err(dev, "PCI master abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) netdev_err(dev, "PCI system error SERR#\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (pci_errs & PCI_STATUS_DETECTED_PARITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) netdev_err(dev, "PCI parity error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) /* For all PCI errors, we should reset the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /* All non-normal interrupt conditions get serviced here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Returns non-zero if we should just exit the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * handler right now (ie. if we reset the card which invalidates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * all of the other original irq status bits).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (status & INTR_RX_TAG_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /* corrupt RX tag framing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) "corrupt rx tag framing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) cp->net_stats[0].rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (status & INTR_RX_LEN_MISMATCH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /* length mismatch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) "length mismatch for rx frame\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) cp->net_stats[0].rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (status & INTR_PCS_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (cas_pcs_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (status & INTR_TX_MAC_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (cas_txmac_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (status & INTR_RX_MAC_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (cas_rxmac_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (status & INTR_MAC_CTRL_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (cas_mac_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (status & INTR_MIF_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (cas_mif_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (status & INTR_PCI_ERROR_STATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (cas_pci_interrupt(dev, cp, status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) goto do_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) do_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) atomic_inc(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) netdev_err(dev, "reset called in cas_abnormal_irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * determining whether to do a netif_stop/wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) const int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) unsigned long off = addr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (CAS_TABORT(cp) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return TX_TARGET_ABORT_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct cas_tx_desc *txds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct sk_buff **skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct net_device *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) int entry, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) spin_lock(&cp->tx_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) txds = cp->init_txds[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) skbs = cp->tx_skbs[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) entry = cp->tx_old[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) count = TX_BUFF_COUNT(ring, entry, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) while (entry != limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct sk_buff *skb = skbs[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) dma_addr_t daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) u32 dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) int frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /* this should never occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) /* however, we might get only a partial skb release. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) count -= skb_shinfo(skb)->nr_frags +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) + cp->tx_tiny_use[ring][entry].nbufs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) "tx[%d] done, slot %d\n", ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) skbs[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) cp->tx_tiny_use[ring][entry].nbufs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct cas_tx_desc *txd = txds + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) daddr = le64_to_cpu(txd->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) dlen = CAS_VAL(TX_DESC_BUFLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) le64_to_cpu(txd->control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) dma_unmap_page(&cp->pdev->dev, daddr, dlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* tiny buffer may follow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (cp->tx_tiny_use[ring][entry].used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) cp->tx_tiny_use[ring][entry].used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) spin_lock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) cp->net_stats[ring].tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) cp->net_stats[ring].tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) spin_unlock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) dev_consume_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) cp->tx_old[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) /* this is wrong for multiple tx rings. the net device needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * multiple queues for this to do the right thing. we wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * for 2*packets to be available when using tiny buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (netif_queue_stopped(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) netif_wake_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) spin_unlock(&cp->tx_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) static void cas_tx(struct net_device *dev, struct cas *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) int limit, ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) #ifdef USE_TX_COMPWB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) netif_printk(cp, intr, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) "tx interrupt, status: 0x%x, %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) status, (unsigned long long)compwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* process all the rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) for (ring = 0; ring < N_TX_RINGS; ring++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) #ifdef USE_TX_COMPWB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) /* use the completion writeback registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) CAS_VAL(TX_COMPWB_LSB, compwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) compwb = TX_COMPWB_NEXT(compwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) limit = readl(cp->regs + REG_TX_COMPN(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) if (cp->tx_old[ring] != limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) cas_tx_ringN(cp, ring, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) int entry, const u64 *words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) struct sk_buff **skbref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) int dlen, hlen, len, i, alloclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) int off, swivel = RX_SWIVEL_OFF_VAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct cas_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) void *addr, *crcaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) __sum16 csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) len = hlen + dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) alloclen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) alloclen = max(hlen, RX_COPY_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) *skbref = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) skb_reserve(skb, swivel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) p = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) addr = crcaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (hlen) { /* always copy header pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) i = hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (!dlen) /* attach FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) i += cp->crc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) i, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) addr = cas_page_map(page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) memcpy(p, addr + off, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) dma_sync_single_for_device(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) page->dma_addr + off, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) RX_USED_ADD(page, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) p += hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) swivel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (alloclen < (hlen + dlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) skb_frag_t *frag = skb_shinfo(skb)->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) /* normal or jumbo packets. we use frags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) hlen = min(cp->page_size - off, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (hlen < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) "rx page overflow: %d\n", hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) i = hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (i == dlen) /* attach FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) i += cp->crc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) i, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* make sure we always copy a header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) swivel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (p == (char *) skb->data) { /* not split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) addr = cas_page_map(page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) memcpy(p, addr + off, RX_COPY_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dma_sync_single_for_device(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) page->dma_addr + off, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) off += RX_COPY_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) swivel = RX_COPY_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) RX_USED_ADD(page, cp->mtu_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) RX_USED_ADD(page, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) skb_put(skb, alloclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) skb_shinfo(skb)->nr_frags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) skb->data_len += hlen - swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) skb->truesize += hlen - swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) skb->len += hlen - swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) __skb_frag_set_page(frag, page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) __skb_frag_ref(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) skb_frag_off_set(frag, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) skb_frag_size_set(frag, hlen - swivel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /* any more data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) hlen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) dma_sync_single_for_cpu(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) page->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) hlen + cp->crc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) dma_sync_single_for_device(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) page->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) hlen + cp->crc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) skb_shinfo(skb)->nr_frags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) skb->data_len += hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) skb->len += hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) frag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) __skb_frag_set_page(frag, page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) __skb_frag_ref(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) skb_frag_off_set(frag, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) skb_frag_size_set(frag, hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) RX_USED_ADD(page, hlen + cp->crc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (cp->crc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) addr = cas_page_map(page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) crcaddr = addr + off + hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /* copying packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (!dlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) goto end_copy_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) hlen = min(cp->page_size - off, dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (hlen < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) "rx page overflow: %d\n", hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) i = hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (i == dlen) /* attach FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) i += cp->crc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) i, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) addr = cas_page_map(page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) memcpy(p, addr + off, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) dma_sync_single_for_device(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) page->dma_addr + off, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) if (p == (char *) skb->data) /* not split */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) RX_USED_ADD(page, cp->mtu_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) RX_USED_ADD(page, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /* any more data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) p += hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dma_sync_single_for_cpu(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) page->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) dlen + cp->crc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) addr = cas_page_map(page->buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) memcpy(p, addr, dlen + cp->crc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) dma_sync_single_for_device(&cp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) page->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) dlen + cp->crc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) RX_USED_ADD(page, dlen + cp->crc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) end_copy_pkt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (cp->crc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) crcaddr = skb->data + alloclen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) skb_put(skb, alloclen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (cp->crc_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* checksum includes FCS. strip it out. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) csum_unfold(csum)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) skb->protocol = eth_type_trans(skb, cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) skb->csum = csum_unfold(~csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) skb->ip_summed = CHECKSUM_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* we can handle up to 64 rx flows at a time. we do the same thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * as nonreassm except that we batch up the buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * NOTE: we currently just treat each flow as a bunch of packets that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * we pass up. a better way would be to coalesce the packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * into a jumbo packet. to do that, we need to do the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * 1) the first packet will have a clean split between header and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * data. save both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * 2) each time the next flow packet comes in, extend the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * data length and merge the checksums.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * 3) on flow release, fix up the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * 4) make sure the higher layer doesn't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * because packets get coalesced, we shouldn't run into fragment count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct sk_buff_head *flow = &cp->rx_flows[flowid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* this is protected at a higher layer, so no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * do any additional locking here. stick the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * at the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) __skb_queue_tail(flow, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (words[0] & RX_COMP1_RELEASE_FLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) while ((skb = __skb_dequeue(flow))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) cas_skb_release(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* put rx descriptor back on ring. if a buffer is in use by a higher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * layer, this will need to put in a replacement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static void cas_post_page(struct cas *cp, const int ring, const int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) cas_page_t *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) entry = cp->rx_old[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) new = cas_page_swap(cp, ring, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) cp->init_rxds[ring][entry].index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) CAS_BASE(RX_INDEX_RING, ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) entry = RX_DESC_ENTRY(ring, entry + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) cp->rx_old[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (entry % 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (ring == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) writel(entry, cp->regs + REG_RX_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) else if ((N_RX_DESC_RINGS > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) (cp->cas_flags & CAS_FLAG_REG_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) writel(entry, cp->regs + REG_PLUS_RX_KICK1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) /* only when things are bad */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) unsigned int entry, last, count, released;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) int cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) cas_page_t **page = cp->rx_pages[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) entry = cp->rx_old[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) netif_printk(cp, intr, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) "rxd[%d] interrupt, done: %d\n", ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) cluster = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) count = entry & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) released = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) while (entry != last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) /* make a new buffer if it's still in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (page_count(page[entry]->buffer) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) cas_page_t *new = cas_page_dequeue(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) /* let the timer know that we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) * do this again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (!timer_pending(&cp->link_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) mod_timer(&cp->link_timer, jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) CAS_LINK_FAST_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) cp->rx_old[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) cp->rx_last[ring] = num ? num - released : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) spin_lock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) list_add(&page[entry]->list, &cp->rx_inuse_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) spin_unlock(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) cp->init_rxds[ring][entry].buffer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) cpu_to_le64(new->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) page[entry] = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (++count == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) cluster = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) released++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) entry = RX_DESC_ENTRY(ring, entry + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) cp->rx_old[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (cluster < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (ring == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) writel(cluster, cp->regs + REG_RX_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) else if ((N_RX_DESC_RINGS > 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) (cp->cas_flags & CAS_FLAG_REG_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /* process a completion ring. packets are set up in three basic ways:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * small packets: should be copied header + data in single buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) * large packets: header and data in a single buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) * split packets: header in a separate buffer from data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * data may be in multiple pages. data may be > 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) * bytes but in a single page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * NOTE: RX page posting is done in this routine as well. while there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * the capability of using multiple RX completion rings, it isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * really worthwhile due to the fact that the page posting will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * force serialization on the single descriptor ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) static int cas_rx_ringN(struct cas *cp, int ring, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) int entry, drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) int npackets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) netif_printk(cp, intr, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) "rx[%d] interrupt, done: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) entry = cp->rx_new[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct cas_rx_comp *rxc = rxcs + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) int type, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) u64 words[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int i, dring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) words[0] = le64_to_cpu(rxc->word1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) words[1] = le64_to_cpu(rxc->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) words[2] = le64_to_cpu(rxc->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) words[3] = le64_to_cpu(rxc->word4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) /* don't touch if still owned by hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) type = CAS_VAL(RX_COMP1_TYPE, words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (type == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) /* hw hasn't cleared the zero bit yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) if (words[3] & RX_COMP4_ZERO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) /* get info on the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) spin_lock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) cp->net_stats[ring].rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (words[3] & RX_COMP4_LEN_MISMATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) cp->net_stats[ring].rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) if (words[3] & RX_COMP4_BAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) cp->net_stats[ring].rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) spin_unlock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /* We'll just return it to Cassini. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) drop_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) spin_lock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) ++cp->net_stats[ring].rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) spin_unlock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (len < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) ++drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) goto drop_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) /* see if it's a flow re-assembly or not. the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * itself handles release back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (RX_DONT_BATCH || (type == 0x2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /* non-reassm: these always get released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) cas_skb_release(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) cas_rx_flow_pkt(cp, words, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) spin_lock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) cp->net_stats[ring].rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) cp->net_stats[ring].rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) spin_unlock(&cp->stat_lock[ring]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) npackets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) /* should it be released? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (words[0] & RX_COMP1_RELEASE_HDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) dring = CAS_VAL(RX_INDEX_RING, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) i = CAS_VAL(RX_INDEX_NUM, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) cas_post_page(cp, dring, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (words[0] & RX_COMP1_RELEASE_DATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) dring = CAS_VAL(RX_INDEX_RING, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) i = CAS_VAL(RX_INDEX_NUM, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) cas_post_page(cp, dring, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (words[0] & RX_COMP1_RELEASE_NEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) dring = CAS_VAL(RX_INDEX_RING, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) i = CAS_VAL(RX_INDEX_NUM, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) cas_post_page(cp, dring, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* skip to the next entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) entry = RX_COMP_ENTRY(ring, entry + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) CAS_VAL(RX_COMP1_SKIP, words[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (budget && (npackets >= budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) cp->rx_new[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (drops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) /* put completion entries back on the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static void cas_post_rxcs_ringN(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) struct cas *cp, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) struct cas_rx_comp *rxc = cp->init_rxcs[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) int last, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) last = cp->rx_cur[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) entry = cp->rx_new[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) netif_printk(cp, intr, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) "rxc[%d] interrupt, done: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* zero and re-mark descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) while (last != entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) cas_rxc_init(rxc + last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) last = RX_COMP_ENTRY(ring, last + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) cp->rx_cur[ring] = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (ring == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) writel(last, cp->regs + REG_RX_COMP_TAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) /* cassini can use all four PCI interrupts for the completion ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * rings 3 and 4 are identical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static inline void cas_handle_irqN(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) struct cas *cp, const u32 status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) const int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) cas_post_rxcs_ringN(dev, cp, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static irqreturn_t cas_interruptN(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* check for shared irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) cas_mask_intr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) napi_schedule(&cp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) cas_rx_ringN(cp, ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) status &= ~INTR_RX_DONE_ALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) cas_handle_irqN(dev, cp, status, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) #ifdef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) /* everything but rx packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) static inline void cas_handle_irq1(struct cas *cp, const u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (status & INTR_RX_BUF_UNAVAIL_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) /* Frame arrived, no free RX buffers available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * NOTE: we can get this on a link transition. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) cas_post_rxds_ringN(cp, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) spin_lock(&cp->stat_lock[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) cp->net_stats[1].rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) spin_unlock(&cp->stat_lock[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) if (status & INTR_RX_BUF_AE_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) RX_AE_FREEN_VAL(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) cas_post_rxcs_ringN(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) /* ring 2 handles a few more events than 3 and 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) static irqreturn_t cas_interrupt1(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) /* check for shared interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) cas_mask_intr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) napi_schedule(&cp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) cas_rx_ringN(cp, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) status &= ~INTR_RX_DONE_ALT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) cas_handle_irq1(cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static inline void cas_handle_irq(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct cas *cp, const u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* housekeeping interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (status & INTR_ERROR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) cas_abnormal_irq(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (status & INTR_RX_BUF_UNAVAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /* Frame arrived, no free RX buffers available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * NOTE: we can get this on a link transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) cas_post_rxds_ringN(cp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) cp->net_stats[0].rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) } else if (status & INTR_RX_BUF_AE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) RX_AE_FREEN_VAL(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) cas_post_rxcs_ringN(dev, cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) static irqreturn_t cas_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct net_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) u32 status = readl(cp->regs + REG_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) cas_tx(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) status &= ~(INTR_TX_ALL | INTR_TX_INTME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) if (status & INTR_RX_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) cas_mask_intr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) napi_schedule(&cp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) cas_rx_ringN(cp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) status &= ~INTR_RX_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) cas_handle_irq(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) static int cas_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) struct cas *cp = container_of(napi, struct cas, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct net_device *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) int i, enable_intr, credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) u32 status = readl(cp->regs + REG_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) cas_tx(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* NAPI rx packets. we spread the credits across all of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * rxc rings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * to make sure we're fair with the work we loop through each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * ring N_RX_COMP_RING times with a request of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * budget / N_RX_COMP_RINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) enable_intr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) for (i = 0; i < N_RX_COMP_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) for (j = 0; j < N_RX_COMP_RINGS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) if (credits >= budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) enable_intr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) goto rx_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) rx_comp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* final rx completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) cas_handle_irq(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) #ifdef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (N_RX_COMP_RINGS > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) cas_handle_irq1(dev, cp, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) #ifdef USE_PCI_INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (N_RX_COMP_RINGS > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) cas_handle_irqN(dev, cp, status, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) #ifdef USE_PCI_INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (N_RX_COMP_RINGS > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) cas_handle_irqN(dev, cp, status, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) if (enable_intr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) cas_unmask_intr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) return credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) static void cas_netpoll(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) cas_disable_irq(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) cas_interrupt(cp->pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) cas_enable_irq(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) #ifdef USE_PCI_INTB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (N_RX_COMP_RINGS > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) /* cas_interrupt1(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) #ifdef USE_PCI_INTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (N_RX_COMP_RINGS > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /* cas_interruptN(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) #ifdef USE_PCI_INTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) if (N_RX_COMP_RINGS > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /* cas_interruptN(); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) netdev_err(dev, "transmit timed out, resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (!cp->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) netdev_err(dev, "hrm.. hw not running!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) netdev_err(dev, "MIF_STATE[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) readl(cp->regs + REG_MIF_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) netdev_err(dev, "MAC_STATE[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) readl(cp->regs + REG_MAC_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) readl(cp->regs + REG_TX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) readl(cp->regs + REG_MAC_TX_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) readl(cp->regs + REG_MAC_TX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) readl(cp->regs + REG_TX_FIFO_PKT_CNT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) readl(cp->regs + REG_TX_FIFO_READ_PTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) readl(cp->regs + REG_TX_SM_1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) readl(cp->regs + REG_TX_SM_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) readl(cp->regs + REG_RX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) readl(cp->regs + REG_MAC_RX_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) readl(cp->regs + REG_MAC_RX_CFG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) readl(cp->regs + REG_HP_STATE_MACHINE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) readl(cp->regs + REG_HP_STATUS0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) readl(cp->regs + REG_HP_STATUS1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) readl(cp->regs + REG_HP_STATUS2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) atomic_inc(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) static inline int cas_intme(int ring, int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) /* Algorithm: IRQ every 1/2 of descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) static void cas_write_txd(struct cas *cp, int ring, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) dma_addr_t mapping, int len, u64 ctrl, int last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (cas_intme(ring, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) ctrl |= TX_DESC_INTME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) ctrl |= TX_DESC_EOF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) txd->control = cpu_to_le64(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) txd->buffer = cpu_to_le64(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) static inline void *tx_tiny_buf(struct cas *cp, const int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) const int entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) const int entry, const int tentry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) cp->tx_tiny_use[ring][tentry].nbufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) cp->tx_tiny_use[ring][entry].used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) struct net_device *dev = cp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) int entry, nr_frags, frag, tabort, tentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) u64 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) spin_lock_irqsave(&cp->tx_lock[ring], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) /* This is a hard error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) if (TX_BUFFS_AVAIL(cp, ring) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) const u64 csum_start_off = skb_checksum_start_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ctrl = TX_DESC_CSUM_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) entry = cp->tx_new[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) cp->tx_skbs[ring][entry] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) nr_frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) offset_in_page(skb->data), len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) tentry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (unlikely(tabort)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) /* NOTE: len is always > tabort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) cas_write_txd(cp, ring, entry, mapping, len - tabort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) ctrl | TX_DESC_SOF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) skb_copy_from_linear_data_offset(skb, len - tabort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) tx_tiny_buf(cp, ring, entry), tabort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) mapping = tx_tiny_map(cp, ring, entry, tentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) (nr_frags == 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) cas_write_txd(cp, ring, entry, mapping, len, ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) TX_DESC_SOF, (nr_frags == 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) for (frag = 0; frag < nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) len = skb_frag_size(fragp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (unlikely(tabort)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) /* NOTE: len is always > tabort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) cas_write_txd(cp, ring, entry, mapping, len - tabort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ctrl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) addr = cas_page_map(skb_frag_page(fragp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) memcpy(tx_tiny_buf(cp, ring, entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) addr + skb_frag_off(fragp) + len - tabort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) tabort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) cas_page_unmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) mapping = tx_tiny_map(cp, ring, entry, tentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) len = tabort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) cas_write_txd(cp, ring, entry, mapping, len, ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) (frag + 1 == nr_frags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) entry = TX_DESC_NEXT(ring, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) cp->tx_new[ring] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) netif_printk(cp, tx_queued, KERN_DEBUG, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) "tx[%d] queued, slot %d, skblen %d, avail %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) writel(entry, cp->regs + REG_TX_KICKN(ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) /* this is only used as a load-balancing hint, so it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * need to be SMP safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) static int ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (skb_padto(skb, cp->min_frame_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) /* XXX: we need some higher-level QoS hooks to steer packets to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) * individual queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) static void cas_init_tx_dma(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) u64 desc_dma = cp->block_dvma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) /* set up tx completion writeback registers. must be 8-byte aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) #ifdef USE_TX_COMPWB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) off = offsetof(struct cas_init_block, tx_compwb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /* enable completion writebacks, enable paced mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * disable read pipe, and disable pre-interrupt compwbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) TX_CFG_INTR_COMPWB_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) /* write out tx ring info and tx desc bases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) for (i = 0; i < MAX_TX_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) off = (unsigned long) cp->init_txds[i] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) (unsigned long) cp->init_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) val |= CAS_TX_RINGN_BASE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) writel((desc_dma + off) & 0xffffffff, cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) REG_TX_DBN_LOW(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) /* don't zero out the kick register here as the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * will wedge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) writel(val, cp->regs + REG_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) /* program max burst sizes. these numbers should be different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) * if doing QoS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) #ifdef USE_QOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) writel(0x800, cp->regs + REG_TX_MAXBURST_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) writel(0x800, cp->regs + REG_TX_MAXBURST_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) writel(0x800, cp->regs + REG_TX_MAXBURST_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) writel(0x800, cp->regs + REG_TX_MAXBURST_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) writel(0x800, cp->regs + REG_TX_MAXBURST_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) static inline void cas_init_dma(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) cas_init_tx_dma(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) cas_init_rx_dma(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) static void cas_process_mc_list(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) u16 hash_table[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) int i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) memset(hash_table, 0, sizeof(hash_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) netdev_for_each_mc_addr(ha, cp->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) if (i <= CAS_MC_EXACT_MATCH_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /* use the alternate mac address registers for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * first 15 multicast addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) writel((ha->addr[4] << 8) | ha->addr[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) cp->regs + REG_MAC_ADDRN(i*3 + 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) writel((ha->addr[2] << 8) | ha->addr[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) cp->regs + REG_MAC_ADDRN(i*3 + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) writel((ha->addr[0] << 8) | ha->addr[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) cp->regs + REG_MAC_ADDRN(i*3 + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /* use hw hash table for the next series of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) * multicast addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) crc = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) crc >>= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) static u32 cas_setup_multicast(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) u32 rxcfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (cp->dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) rxcfg |= MAC_RX_CFG_PROMISC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) } else if (cp->dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) for (i=0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) cas_process_mc_list(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) return rxcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* must be invoked under cp->stat_lock[N_TX_RINGS] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) static void cas_clear_mac_err(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) writel(0, cp->regs + REG_MAC_COLL_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) writel(0, cp->regs + REG_MAC_COLL_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) writel(0, cp->regs + REG_MAC_COLL_EXCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) writel(0, cp->regs + REG_MAC_COLL_LATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) writel(0, cp->regs + REG_MAC_TIMER_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) writel(0, cp->regs + REG_MAC_RECV_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) writel(0, cp->regs + REG_MAC_LEN_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) writel(0, cp->regs + REG_MAC_ALIGN_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) writel(0, cp->regs + REG_MAC_FCS_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) static void cas_mac_reset(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) /* do both TX and RX reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) writel(0x1, cp->regs + REG_MAC_TX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) writel(0x1, cp->regs + REG_MAC_RX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) /* wait for TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) i = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) /* wait for RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) i = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (readl(cp->regs + REG_MAC_TX_RESET) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) readl(cp->regs + REG_MAC_RX_RESET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) readl(cp->regs + REG_MAC_TX_RESET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) readl(cp->regs + REG_MAC_RX_RESET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) readl(cp->regs + REG_MAC_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) static void cas_init_mac(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) unsigned char *e = &cp->dev->dev_addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) cas_mac_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) /* setup core arbitration weight register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) /* set the infinite burst register for chips that don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * pci issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) writel(0x00, cp->regs + REG_MAC_IPG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) writel(0x08, cp->regs + REG_MAC_IPG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) writel(0x04, cp->regs + REG_MAC_IPG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) /* change later for 802.3z */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) /* min frame + FCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) * specify the maximum frame size to prevent RX tag errors on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) * oversized frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) cp->regs + REG_MAC_FRAMESIZE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) /* NOTE: crc_size is used as a surrogate for half-duplex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * workaround saturn half-duplex issue by increasing preamble
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) * size to 65 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) writel(0x41, cp->regs + REG_MAC_PA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) writel(0x07, cp->regs + REG_MAC_PA_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) /* setup mac address in perfect filter array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) for (i = 0; i < 45; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) writel(0x0, cp->regs + REG_MAC_ADDRN(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) cp->mac_rx_cfg = cas_setup_multicast(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) spin_lock(&cp->stat_lock[N_TX_RINGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) cas_clear_mac_err(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) spin_unlock(&cp->stat_lock[N_TX_RINGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) /* Setup MAC interrupts. We want to get all of the interesting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * counter expiration events, but we do not want to hear about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * normal rx/tx as the DMA engine tells us that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) /* Don't enable even the PAUSE interrupts for now, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) * make no use of those events other than to record them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) static void cas_init_pause_thresholds(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) /* Calculate pause thresholds. Setting the OFF threshold to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) * full RX fifo size effectively disables PAUSE generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (cp->rx_fifo_size <= (2 * 1024)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (max_frame * 3 > cp->rx_fifo_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) cp->rx_pause_off = 7104;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) cp->rx_pause_on = 960;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) int off = (cp->rx_fifo_size - (max_frame * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) int on = off - max_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) cp->rx_pause_off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) cp->rx_pause_on = on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) static int cas_vpd_match(const void __iomem *p, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) int len = strlen(str) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) if (readb(p + i) != str[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) /* get the mac address by reading the vpd information in the rom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) * also get the phy type and determine if there's an entropy generator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) * NOTE: this is a bit convoluted for the following reasons:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) * 1) vpd info has order-dependent mac addresses for multinic cards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) * 2) the only way to determine the nic order is to use the slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) * number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * 3) fiber cards don't have bridges, so their slot numbers don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) * mean anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * 4) we don't actually know we have a fiber card until after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) * the mac addresses are parsed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) const int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) void __iomem *base, *kstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) int i, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) #define VPD_FOUND_MAC 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) #define VPD_FOUND_PHY 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) int mac_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) #if defined(CONFIG_SPARC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) const unsigned char *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /* give us access to the PROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) cp->regs + REG_BIM_LOCAL_DEV_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) /* check for an expansion rom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) goto use_random_mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) /* search for beginning of vpd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) /* check for PCIR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if ((readb(p + i + 0) == 0x50) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) (readb(p + i + 1) == 0x43) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) (readb(p + i + 2) == 0x49) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) (readb(p + i + 3) == 0x52)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) base = p + (readb(p + i + 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) (readb(p + i + 9) << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) if (!base || (readb(base) != 0x82))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) goto use_random_mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) while (i < EXPANSION_ROM_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (readb(base + i) != 0x90) /* no vpd found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) goto use_random_mac_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) /* found a vpd field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) len = readb(base + i + 1) | (readb(base + i + 2) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) /* extract keywords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) kstart = base + i + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) p = kstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) while ((p - kstart) < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) int klen = readb(p + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) char type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) p += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) /* look for the following things:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * -- correct length == 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * 3 (type) + 2 (size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * 18 (strlen("local-mac-address") + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) * 6 (mac addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) * -- VPD Instance 'I'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) * -- VPD Type Bytes 'B'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) * -- VPD data length == 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) * -- property string == local-mac-address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) * -- correct length == 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) * 3 (type) + 2 (size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) * 12 (strlen("entropy-dev") + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) * 7 (strlen("vms110") + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) * -- VPD Instance 'I'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) * -- VPD Type String 'B'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * -- VPD data length == 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * -- property string == entropy-dev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * -- correct length == 18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) * 3 (type) + 2 (size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) * 9 (strlen("phy-type") + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * 4 (strlen("pcs") + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) * -- VPD Instance 'I'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * -- VPD Type String 'S'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * -- VPD data length == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) * -- property string == phy-type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) * -- correct length == 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) * 3 (type) + 2 (size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) * 14 (strlen("phy-interface") + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) * 4 (strlen("pcs") + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) * -- VPD Instance 'I'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) * -- VPD Type String 'S'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) * -- VPD data length == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) * -- property string == phy-interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (readb(p) != 'I')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /* finally, check string and length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) type = readb(p + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) if (type == 'B') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if ((klen == 29) && readb(p + 4) == 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) cas_vpd_match(p + 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) "local-mac-address")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (mac_off++ > offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) /* set mac address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) for (j = 0; j < 6; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) dev_addr[j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) readb(p + 23 + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) goto found_mac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) if (type != 'S')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) #ifdef USE_ENTROPY_DEV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if ((klen == 24) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) cas_vpd_match(p + 5, "entropy-dev") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) cas_vpd_match(p + 17, "vms110")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (found & VPD_FOUND_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if ((klen == 18) && readb(p + 4) == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) cas_vpd_match(p + 5, "phy-type")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (cas_vpd_match(p + 14, "pcs")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) phy_type = CAS_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) goto found_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if ((klen == 23) && readb(p + 4) == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) cas_vpd_match(p + 5, "phy-interface")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (cas_vpd_match(p + 19, "pcs")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) phy_type = CAS_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) goto found_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) found_mac:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) found |= VPD_FOUND_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) found_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) found |= VPD_FOUND_PHY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) p += klen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) i += len + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) use_random_mac_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (found & VPD_FOUND_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) #if defined(CONFIG_SPARC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) addr = of_get_property(cp->of_node, "local-mac-address", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) if (addr != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) memcpy(dev_addr, addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) /* Sun MAC prefix then 3 random bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) pr_info("MAC address not found in ROM VPD\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) dev_addr[0] = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) dev_addr[1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) dev_addr[2] = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) get_random_bytes(dev_addr + 3, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) return phy_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) /* check pci invariants */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) static void cas_check_pci_invariants(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) struct pci_dev *pdev = cp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) cp->cas_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (pdev->revision >= CAS_ID_REVPLUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) cp->cas_flags |= CAS_FLAG_REG_PLUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) if (pdev->revision < CAS_ID_REVPLUS02u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) /* Original Cassini supports HW CSUM, but it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) * enabled by default as it can trigger TX hangs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (pdev->revision < CAS_ID_REV2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /* Only sun has original cassini chips. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) cp->cas_flags |= CAS_FLAG_REG_PLUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) /* We use a flag because the same phy might be externally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) (pdev->device == PCI_DEVICE_ID_NS_SATURN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) cp->cas_flags |= CAS_FLAG_SATURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static int cas_check_invariants(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) struct pci_dev *pdev = cp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) u32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) /* get page size for rx buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) cp->page_order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) #ifdef USE_PAGE_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) /* see if we can allocate larger pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) struct page *page = alloc_pages(GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) CAS_JUMBO_PAGE_SHIFT -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) cp->page_size = (PAGE_SIZE << cp->page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) /* Fetch the FIFO configurations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) cp->rx_fifo_size = RX_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) /* finish phy determination. MDIO1 takes precedence over MDIO0 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * they're both connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) PCI_SLOT(pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (cp->phy_type & CAS_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) cp->cas_flags |= CAS_FLAG_1000MB_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) return 0; /* no more checking needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) /* MII */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) cfg = readl(cp->regs + REG_MIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (cfg & MIF_CFG_MDIO_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) cp->phy_type = CAS_PHY_MII_MDIO1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) } else if (cfg & MIF_CFG_MDIO_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) cp->phy_type = CAS_PHY_MII_MDIO0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) u32 phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) for (j = 0; j < 3; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) cp->phy_addr = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) phy_id |= cas_phy_read(cp, MII_PHYSID2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) if (phy_id && (phy_id != 0xFFFFFFFF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) cp->phy_id = phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) pr_err("MII phy did not respond [%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) readl(cp->regs + REG_MIF_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) /* see if we can do gigabit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) cfg = cas_phy_read(cp, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) if ((cfg & CAS_BMSR_1000_EXTEND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) cas_phy_read(cp, CAS_MII_1000_EXTEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) cp->cas_flags |= CAS_FLAG_1000MB_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static inline void cas_start_dma(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) int txfailed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) /* enable dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) writel(val, cp->regs + REG_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) writel(val, cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) /* enable the mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) writel(val, cp->regs + REG_MAC_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) writel(val, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) i = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) val = readl(cp->regs + REG_MAC_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) if ((val & MAC_TX_CFG_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) if (i < 0) txfailed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) i = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) val = readl(cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if ((val & MAC_RX_CFG_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (txfailed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) netdev_err(cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) "enabling mac failed [tx:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) readl(cp->regs + REG_MIF_STATE_MACHINE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) readl(cp->regs + REG_MAC_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) goto enable_rx_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) (txfailed ? "tx,rx" : "rx"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) readl(cp->regs + REG_MIF_STATE_MACHINE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) readl(cp->regs + REG_MAC_STATE_MACHINE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) enable_rx_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) cas_unmask_intr(cp); /* enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) writel(0, cp->regs + REG_RX_COMP_TAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (N_RX_DESC_RINGS > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) writel(RX_DESC_RINGN_SIZE(1) - 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) cp->regs + REG_PLUS_RX_KICK1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) for (i = 1; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) int *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) u32 val = readl(cp->regs + REG_PCS_MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (val & PCS_MII_LPA_ASYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) *pause |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) *spd = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) int *pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) *fd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) *spd = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) *pause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) /* use GMII registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) val = cas_phy_read(cp, MII_LPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (val & CAS_LPA_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) *pause = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) if (val & CAS_LPA_ASYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) *pause |= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (val & LPA_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) *fd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) if (val & LPA_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) *spd = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) val = cas_phy_read(cp, CAS_MII_1000_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) *spd = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (val & CAS_LPA_1000FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) *fd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) /* A link-up condition has occurred, initialize and enable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) * rest of the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) * Must be invoked under cp->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) static void cas_set_link_modes(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) int full_duplex, speed, pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) full_duplex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) speed = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) pause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) val = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) if (val & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) cas_read_mii_link_mode(cp, &full_duplex, &speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) &pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (val & BMCR_FULLDPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) if (val & BMCR_SPEED100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) speed = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) else if (val & CAS_BMCR_SPEED1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 1000 : 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) val = readl(cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) if ((val & PCS_MII_AUTONEG_EN) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) if (val & PCS_MII_CTRL_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) full_duplex = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) speed, full_duplex ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) if (CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) if (!full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) val |= MAC_XIF_DISABLE_ECHO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) if (full_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) val |= MAC_XIF_FDPLX_LED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (speed == 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) val |= MAC_XIF_GMII_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) writel(val, cp->regs + REG_MAC_XIF_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) /* deal with carrier and collision detect. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) val = MAC_TX_CFG_IPG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) if (full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) val |= MAC_TX_CFG_IGNORE_CARRIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) val |= MAC_TX_CFG_IGNORE_COLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) #ifndef USE_CSMA_CD_PROTO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) /* val now set up for REG_MAC_TX_CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) /* If gigabit and half-duplex, enable carrier extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) * mode. increase slot time to 512 bytes as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) * else, disable it and make sure slot time is 64 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) * also activate checksum bug workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if ((speed == 1000) && !full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) writel(val | MAC_TX_CFG_CARRIER_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) cp->regs + REG_MAC_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) val = readl(cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) writel(val | MAC_RX_CFG_CARRIER_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) cp->crc_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) /* minimum size gigabit frame at half duplex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) cp->min_frame_size = CAS_1000MB_MIN_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) writel(val, cp->regs + REG_MAC_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) /* checksum bug workaround. don't strip FCS when in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * half-duplex mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) val = readl(cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) if (full_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) val |= MAC_RX_CFG_STRIP_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) cp->crc_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) cp->min_frame_size = CAS_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) val &= ~MAC_RX_CFG_STRIP_FCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) cp->crc_size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) cp->min_frame_size = CAS_MIN_FRAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if (netif_msg_link(cp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) if (pause & 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) cp->rx_fifo_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) cp->rx_pause_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) cp->rx_pause_on);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) } else if (pause & 0x10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) netdev_info(cp->dev, "TX pause enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) netdev_info(cp->dev, "Pause is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) val = readl(cp->regs + REG_MAC_CTRL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) if (pause) { /* symmetric or asymmetric pause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) if (pause & 0x01) { /* symmetric pause */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) writel(val, cp->regs + REG_MAC_CTRL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) cas_start_dma(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) static void cas_init_hw(struct cas *cp, int restart_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (restart_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) cas_phy_init(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) cas_init_pause_thresholds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) cas_init_mac(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) cas_init_dma(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) if (restart_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) /* Default aneg parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) cp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) cas_begin_auto_negotiation(cp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) } else if (cp->lstate == link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) cas_set_link_modes(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) netif_carrier_on(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) /* Must be invoked under cp->lock. on earlier cassini boards,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) * SOFT_0 is tied to PCI reset. we use this to force a pci reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) * let it settle out, and then restore pci state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) static void cas_hard_reset(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) pci_restore_state(cp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) static void cas_global_reset(struct cas *cp, int blkflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) /* issue a global reset. don't use RSTOUT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) /* For PCS, when the blkflag is set, we should set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) * the last autonegotiation from being cleared. We'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) * need some special handling if the chip is set into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) * loopback mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) cp->regs + REG_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) /* need to wait at least 3ms before polling register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) mdelay(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) limit = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) while (limit-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) u32 val = readl(cp->regs + REG_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) netdev_err(cp->dev, "sw reset failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) /* enable various BIM interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) /* clear out pci error status mask for handled errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) * we don't deal with DMA counter overflows as they happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) * all the time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) PCI_ERR_BIM_DMA_READ), cp->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) REG_PCI_ERR_STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) /* set up for MII by default to address mac rx reset timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) * issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) static void cas_reset(struct cas *cp, int blkflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) cas_mask_intr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) cas_global_reset(cp, blkflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) cas_mac_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) cas_entropy_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) /* disable dma engines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) val = readl(cp->regs + REG_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) val &= ~TX_CFG_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) writel(val, cp->regs + REG_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) val = readl(cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) val &= ~RX_CFG_DMA_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) writel(val, cp->regs + REG_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) /* program header parser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) (CAS_HP_ALT_FIRMWARE == cas_prog_null)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) cas_load_firmware(cp, CAS_HP_FIRMWARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) /* clear out error registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) spin_lock(&cp->stat_lock[N_TX_RINGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) cas_clear_mac_err(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) spin_unlock(&cp->stat_lock[N_TX_RINGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) /* Shut down the chip, must be called with pm_mutex held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) static void cas_shutdown(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) /* Make us not-running to avoid timers respawning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) cp->hw_running = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) del_timer_sync(&cp->link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) /* Stop the reset task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) while (atomic_read(&cp->reset_task_pending_mtu) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) atomic_read(&cp->reset_task_pending_spare) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) atomic_read(&cp->reset_task_pending_all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) while (atomic_read(&cp->reset_task_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) /* Actually stop the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) if (cp->cas_flags & CAS_FLAG_SATURN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) cas_phy_powerdown(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) static int cas_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) if (!netif_running(dev) || !netif_device_present(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) /* let the reset task handle it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) if ((cp->phy_type & CAS_PHY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) atomic_inc(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) atomic_inc(&cp->reset_task_pending_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) CAS_RESET_ALL : CAS_RESET_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) pr_err("reset called in cas_change_mtu\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) flush_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) static void cas_clean_txd(struct cas *cp, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) struct cas_tx_desc *txd = cp->init_txds[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) u64 daddr, dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) size = TX_DESC_RINGN_SIZE(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) int frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) if (skbs[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) skb = skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) int ent = i & (size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) /* first buffer is never a tiny buffer and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) * needs to be unmapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) daddr = le64_to_cpu(txd[ent].buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) dlen = CAS_VAL(TX_DESC_BUFLEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) le64_to_cpu(txd[ent].control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) dma_unmap_page(&cp->pdev->dev, daddr, dlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) if (frag != skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) /* next buffer might by a tiny buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) * skip past it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) ent = i & (size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) if (cp->tx_tiny_use[ring][ent].used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) /* zero out tiny buf usage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /* freed on close */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) static inline void cas_free_rx_desc(struct cas *cp, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) cas_page_t **page = cp->rx_pages[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) size = RX_DESC_RINGN_SIZE(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (page[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) cas_page_free(cp, page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) page[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) static void cas_free_rxds(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) for (i = 0; i < N_RX_DESC_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) cas_free_rx_desc(cp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) /* Must be invoked under cp->lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) static void cas_clean_rings(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) /* need to clean all tx rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) for (i = 0; i < N_TX_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) cas_clean_txd(cp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) /* zero out init block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) memset(cp->init_block, 0, sizeof(struct cas_init_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) cas_clean_rxds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) cas_clean_rxcs(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) /* allocated on open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) cas_page_t **page = cp->rx_pages[ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) int size, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) size = RX_DESC_RINGN_SIZE(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) static int cas_alloc_rxds(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) for (i = 0; i < N_RX_DESC_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) if (cas_alloc_rx_desc(cp, i) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) cas_free_rxds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) static void cas_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) struct cas *cp = container_of(work, struct cas, reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) int pending = atomic_read(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) int pending_all = atomic_read(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) int pending_spare = atomic_read(&cp->reset_task_pending_spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) /* We can have more tasks scheduled than actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) atomic_dec(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) /* The link went down, we reset the ring, but keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) * DMA stopped. Use this function for reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) * on error as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (cp->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /* Make sure we don't get interrupts or tx packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) netif_device_detach(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) if (cp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) /* We call cas_spare_recover when we call cas_open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) * but we do not initialize the lists cas_spare_recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) * uses until cas_open is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) cas_spare_recover(cp, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) /* test => only pending_spare set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) if (!pending_all && !pending_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (pending == CAS_RESET_SPARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) /* when pending == CAS_RESET_ALL, the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) * call to cas_init_hw will restart auto negotiation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) * Setting the second argument of cas_reset to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) * !(pending == CAS_RESET_ALL) will set this argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) * to 1 (avoiding reinitializing the PHY for the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * PCS case) when auto negotiation is not restarted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) cas_reset(cp, !(pending_all > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (cp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) cas_init_hw(cp, (pending_all > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) cas_reset(cp, !(pending == CAS_RESET_ALL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) if (cp->opened)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) cas_init_hw(cp, pending == CAS_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) netif_device_attach(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) atomic_sub(pending_all, &cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) atomic_sub(pending_spare, &cp->reset_task_pending_spare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) atomic_dec(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) atomic_set(&cp->reset_task_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) static void cas_link_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) struct cas *cp = from_timer(cp, t, link_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) int mask, pending = 0, reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (link_transition_timeout != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) cp->link_transition_jiffies_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) ((jiffies - cp->link_transition_jiffies) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) (link_transition_timeout))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) /* One-second counter so link-down workaround doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) * cause resets to occur so fast as to fool the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) * into thinking the link is down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) cp->link_transition_jiffies_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (!cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) cas_lock_tx(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) cas_entropy_gather(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /* If the link task is still pending, we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) * reschedule the link timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) if (atomic_read(&cp->reset_task_pending_all) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) atomic_read(&cp->reset_task_pending_spare) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) atomic_read(&cp->reset_task_pending_mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) if (atomic_read(&cp->reset_task_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) /* check for rx cleaning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) int i, rmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) rmask = CAS_FLAG_RXD_POST(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) if ((mask & rmask) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /* post_rxds will do a mod_timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) cp->cas_flags &= ~rmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) u16 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) bmsr = cas_phy_read(cp, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) /* WTZ: Solaris driver reads this twice, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) * may be due to the PCS case and the use of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) * common implementation. Read it twice here to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) * safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) bmsr = cas_phy_read(cp, MII_BMSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) readl(cp->regs + REG_MIF_STATUS); /* avoid dups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) reset = cas_mii_link_check(cp, bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) reset = cas_pcs_link_check(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) /* check for tx state machine confusion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) u32 wptr, rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) int tlm = CAS_VAL(MAC_SM_TLM, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) if (((tlm == 0x5) || (tlm == 0x3)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) "tx err: MAC_STATE[%08x]\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) if ((val == 0) && (wptr != rptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) "tx err: TX_FIFO[%08x:%08x:%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) val, wptr, rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) if (reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) cas_hard_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) if (reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) atomic_inc(&cp->reset_task_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) atomic_inc(&cp->reset_task_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) pr_err("reset called in cas_link_timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) schedule_work(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) cas_unlock_tx(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) /* tiny buffers are used to avoid target abort issues with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) * older cassini's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) static void cas_tx_tiny_free(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) struct pci_dev *pdev = cp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) for (i = 0; i < N_TX_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) if (!cp->tx_tiny_bufs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) cp->tx_tiny_bufs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) static int cas_tx_tiny_alloc(struct cas *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) struct pci_dev *pdev = cp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) for (i = 0; i < N_TX_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) cp->tx_tiny_bufs[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) &cp->tx_tiny_dvma[i], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) if (!cp->tx_tiny_bufs[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) cas_tx_tiny_free(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) static int cas_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) int hw_was_up, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) hw_was_up = cp->hw_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) /* The power-management mutex protects the hw_running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) * etc. state so it is safe to do this bit without cp->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if (!cp->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) /* Reset the chip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) /* We set the second arg to cas_reset to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) * because cas_init_hw below will have its second
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) * argument set to non-zero, which will force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) * autonegotiation to start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) cp->hw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) if (cas_tx_tiny_alloc(cp) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) /* alloc rx descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) if (cas_alloc_rxds(cp) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) goto err_tx_tiny;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) /* allocate spares */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) cas_spare_init(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) cas_spare_recover(cp, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) /* We can now request the interrupt as we know it's masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) * on the controller. cassini+ has up to 4 interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) * that can be used, but you need to do explicit pci interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * mapping to expose them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if (request_irq(cp->pdev->irq, cas_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) IRQF_SHARED, dev->name, (void *) dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) netdev_err(cp->dev, "failed to request irq !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) goto err_spare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) napi_enable(&cp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) /* init hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) cas_init_hw(cp, !hw_was_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) cp->opened = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) netif_start_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) err_spare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) cas_spare_free(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) cas_free_rxds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) err_tx_tiny:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) cas_tx_tiny_free(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) static int cas_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) napi_disable(&cp->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) /* Make sure we don't get distracted by suspend/resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) netif_stop_queue(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) /* Stop traffic, mark us closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) cp->opened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) cas_phy_init(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) cas_begin_auto_negotiation(cp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) free_irq(cp->pdev->irq, (void *) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) cas_spare_free(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) cas_free_rxds(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) cas_tx_tiny_free(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) const char name[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) } ethtool_cassini_statnames[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) {"collisions"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) {"rx_bytes"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) {"rx_crc_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) {"rx_dropped"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) {"rx_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) {"rx_fifo_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) {"rx_frame_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) {"rx_length_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) {"rx_over_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) {"rx_packets"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) {"tx_aborted_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) {"tx_bytes"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) {"tx_dropped"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) {"tx_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) {"tx_fifo_errors"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) {"tx_packets"}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) const int offsets; /* neg. values for 2nd arg to cas_read_phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) } ethtool_register_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) {-MII_BMSR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) {-MII_BMCR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) {REG_CAWR},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) {REG_INF_BURST},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) {REG_BIM_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) {REG_RX_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) {REG_HP_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) {REG_MAC_TX_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) {REG_MAC_RX_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) {REG_MAC_CTRL_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) {REG_MAC_XIF_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) {REG_MIF_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) {REG_PCS_CFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) {REG_SATURN_PCFG},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) {REG_PCS_MII_STATUS},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) {REG_PCS_STATE_MACHINE},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) {REG_MAC_COLL_EXCESS},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) {REG_MAC_COLL_LATE}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) u8 *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) u16 hval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (ethtool_register_table[i].offsets < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) hval = cas_phy_read(cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) -ethtool_register_table[i].offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) val = hval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) val= readl(cp->regs+ethtool_register_table[i].offsets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) memcpy(p, (u8 *)&val, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) static struct net_device_stats *cas_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) struct net_device_stats *stats = cp->net_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) /* we collate all of the stats into net_stats[N_TX_RING] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) if (!cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) return stats + N_TX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) /* collect outstanding stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) /* WTZ: the Cassini spec gives these as 16 bit counters but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) * stored in 32-bit words. Added a mask of 0xffff to be safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) * in case the chip somehow puts any garbage in the other bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) * Also, counter usage didn't seem to mach what Adrian did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) * in the parts of the code that set these quantities. Made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) * that consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) stats[N_TX_RINGS].rx_crc_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) stats[N_TX_RINGS].rx_frame_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) stats[N_TX_RINGS].rx_length_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) stats[N_TX_RINGS].tx_aborted_errors += tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) stats[N_TX_RINGS].collisions +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) stats[N_TX_RINGS].tx_aborted_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) readl(cp->regs + REG_MAC_COLL_EXCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) readl(cp->regs + REG_MAC_COLL_LATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) cas_clear_mac_err(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) /* saved bits that are unique to ring 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) spin_lock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) stats[N_TX_RINGS].collisions += stats[0].collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) spin_unlock(&cp->stat_lock[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) for (i = 0; i < N_TX_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) spin_lock(&cp->stat_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) stats[N_TX_RINGS].rx_length_errors +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) stats[i].rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) memset(stats + i, 0, sizeof(struct net_device_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) spin_unlock(&cp->stat_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) return stats + N_TX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) static void cas_set_multicast(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) u32 rxcfg, rxcfg_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) int limit = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) if (!cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) /* disable RX MAC and wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) if (!limit--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) /* disable hash filter and wait for completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) limit = STOP_TRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) if (!limit--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) /* program hash filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) rxcfg |= rxcfg_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) static int cas_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) u16 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) int full_duplex, speed, pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) enum link_state linkstate = link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) advertising = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) supported = SUPPORTED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) supported |= SUPPORTED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) advertising |= ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) /* Record PHY settings if HW is on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) linkstate = cp->lstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) if (CAS_PHY_MII(cp->phy_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) cmd->base.port = PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) cmd->base.phy_address = cp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) advertising |= ADVERTISED_TP | ADVERTISED_MII |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) ADVERTISED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) supported |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) (SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) SUPPORTED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) SUPPORTED_TP | SUPPORTED_MII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) if (cp->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) bmcr = cas_phy_read(cp, MII_BMCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) cas_read_mii_link_mode(cp, &full_duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) &speed, &pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) cmd->base.phy_address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) supported |= SUPPORTED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) advertising |= ADVERTISED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) if (cp->hw_running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /* pcs uses the same bits as mii */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) cas_read_pcs_link_mode(cp, &full_duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) &speed, &pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) advertising |= ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) cmd->base.speed = ((speed == 10) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) SPEED_10 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) ((speed == 1000) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) SPEED_1000 : SPEED_100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) SPEED_1000 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) ((bmcr & BMCR_SPEED100) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) SPEED_100 : SPEED_10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) if (linkstate != link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) /* Force these to "unknown" if the link is not up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) * autonogotiation in enabled. We can set the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) * speed to 0, but not cmd->duplex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) * because its legal values are 0 and 1. Ethtool will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) * print the value reported in parentheses after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) * word "Unknown" for unrecognized values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) * If in forced mode, we report the speed and duplex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) * settings that we configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) if (cp->link_cntl & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) cmd->base.speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) cmd->base.duplex = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) cmd->base.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) if (cp->link_cntl & BMCR_SPEED100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) cmd->base.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) cmd->base.speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) DUPLEX_FULL : DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) static int cas_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) /* Verify the settings we care about. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if (cmd->base.autoneg != AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) cmd->base.autoneg != AUTONEG_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) if (cmd->base.autoneg == AUTONEG_DISABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) ((speed != SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) speed != SPEED_100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) speed != SPEED_10) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) (cmd->base.duplex != DUPLEX_HALF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) cmd->base.duplex != DUPLEX_FULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) /* Apply settings and restart link process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) cas_begin_auto_negotiation(cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) static int cas_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) if ((cp->link_cntl & BMCR_ANENABLE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) /* Restart link process. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) cas_begin_auto_negotiation(cp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) static u32 cas_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) return cp->lstate == link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) static u32 cas_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) return cp->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) static void cas_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) cp->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) static int cas_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) regs->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) /* cas_read_regs handles locks (cp->lock). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) cas_read_regs(cp, p, regs->len / sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) static int cas_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) return CAS_NUM_STAT_KEYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) memcpy(data, ðtool_cassini_statnames,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) static void cas_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) struct ethtool_stats *estats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) struct net_device_stats *stats = cas_get_stats(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) data[i++] = stats->collisions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) data[i++] = stats->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) data[i++] = stats->rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) data[i++] = stats->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) data[i++] = stats->rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) data[i++] = stats->rx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) data[i++] = stats->rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) data[i++] = stats->rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) data[i++] = stats->rx_over_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) data[i++] = stats->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) data[i++] = stats->tx_aborted_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) data[i++] = stats->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) data[i++] = stats->tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) data[i++] = stats->tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) data[i++] = stats->tx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) data[i++] = stats->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) BUG_ON(i != CAS_NUM_STAT_KEYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) static const struct ethtool_ops cas_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) .get_drvinfo = cas_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) .nway_reset = cas_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) .get_link = cas_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) .get_msglevel = cas_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) .set_msglevel = cas_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) .get_regs_len = cas_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) .get_regs = cas_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) .get_sset_count = cas_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) .get_strings = cas_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) .get_ethtool_stats = cas_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) .get_link_ksettings = cas_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) .set_link_ksettings = cas_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) struct mii_ioctl_data *data = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) int rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) /* Hold the PM mutex while doing ioctl's or we may collide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) * with open/close and power management and oops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) case SIOCGMIIPHY: /* Get address of MII PHY in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) data->phy_id = cp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) case SIOCGMIIREG: /* Read MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) case SIOCSMIIREG: /* Write MII PHY register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) spin_lock_irqsave(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) cas_mif_poll(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) cas_mif_poll(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) spin_unlock_irqrestore(&cp->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) /* When this chip sits underneath an Intel 31154 bridge, it is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) * only subordinate device and we can tweak the bridge settings to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) * reflect that fact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) static void cas_program_bridge(struct pci_dev *cas_pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) struct pci_dev *pdev = cas_pdev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) /* Clear bit 10 (Bus Parking Control) in the Secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) * Arbiter Control/Status Register which lives at offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) * 0x41. Using a 32-bit word read/modify/write at 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) * is much simpler so that's how we do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) pci_read_config_dword(pdev, 0x40, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) val &= ~0x00040000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) pci_write_config_dword(pdev, 0x40, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) /* Max out the Multi-Transaction Timer settings since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) * Cassini is the only device present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) * The register is 16-bit and lives at 0x50. When the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) * settings are enabled, it extends the GRANT# signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) * for a requestor after a transaction is complete. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) * allows the next request to run without first needing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) * to negotiate the GRANT# signal back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) * Bits 12:10 define the grant duration:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) * 1 -- 16 clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) * 2 -- 32 clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) * 3 -- 64 clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) * 4 -- 128 clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) * 5 -- 256 clocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) * All other values are illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) * Bits 09:00 define which REQ/GNT signal pairs get the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) * GRANT# signal treatment. We set them all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) /* The Read Prefecth Policy register is 16-bit and sits at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) * offset 0x52. It enables a "smart" pre-fetch policy. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) * enable it and max out all of the settings since only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) * device is sitting underneath and thus bandwidth sharing is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) * not an issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) * The register has several 3 bit fields, which indicates a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) * multiplier applied to the base amount of prefetching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) * chip would do. These fields are at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) * 15:13 --- ReRead Primary Bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) * 12:10 --- FirstRead Primary Bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) * 09:07 --- ReRead Secondary Bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) * 06:04 --- FirstRead Secondary Bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) * Bits 03:00 control which REQ/GNT pairs the prefetch settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) * get enabled on. Bit 3 is a grouped enabler which controls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) * the individual REQ/GNT pairs [2:0].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) pci_write_config_word(pdev, 0x52,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) (0x7 << 13) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) (0x7 << 10) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) (0x7 << 7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) (0x7 << 4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) (0xf << 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) /* Force cacheline size to 0x8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) /* Force latency timer to maximum setting so Cassini can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) * sit on the bus as long as it likes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) static const struct net_device_ops cas_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) .ndo_open = cas_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) .ndo_stop = cas_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) .ndo_start_xmit = cas_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) .ndo_get_stats = cas_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) .ndo_set_rx_mode = cas_set_multicast,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) .ndo_do_ioctl = cas_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) .ndo_tx_timeout = cas_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) .ndo_change_mtu = cas_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) .ndo_poll_controller = cas_netpoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) static int cas_version_printed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) unsigned long casreg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) struct cas *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) int i, err, pci_using_dac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) u16 pci_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) if (cas_version_printed++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) pr_info("%s", version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) dev_err(&pdev->dev, "Cannot find proper PCI device "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) "base address, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) dev = alloc_etherdev(sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) err = pci_request_regions(pdev, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) goto err_out_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) /* we must always turn on parity response or else parity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) * doesn't get generated properly. disable SERR/PERR as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) * in addition, we want to turn MWI on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) pci_cmd &= ~PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) pci_cmd |= PCI_COMMAND_PARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) if (pci_try_set_mwi(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) cas_program_bridge(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) * On some architectures, the default cache line size set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) * by pci_try_set_mwi reduces perforamnce. We have to increase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) * it for this case. To start, we'll print some configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) &orig_cacheline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) cas_cacheline_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) if (pci_write_config_byte(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) cas_cacheline_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) dev_err(&pdev->dev, "Could not set PCI cache "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) "line size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) /* Configure DMA attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) pci_using_dac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) "for consistent allocations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) dev_err(&pdev->dev, "No usable DMA configuration, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) "aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) pci_using_dac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) casreg_len = pci_resource_len(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) cp->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) /* A value of 0 indicates we never explicitly set it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) cp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) cassini_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) #if defined(CONFIG_SPARC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) cp->of_node = pci_device_to_OF_node(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) cp->link_transition = LINK_TRANSITION_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) cp->link_transition_jiffies_valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) spin_lock_init(&cp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) spin_lock_init(&cp->rx_inuse_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) spin_lock_init(&cp->rx_spare_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) for (i = 0; i < N_TX_RINGS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) spin_lock_init(&cp->stat_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) spin_lock_init(&cp->tx_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) mutex_init(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) timer_setup(&cp->link_timer, cas_link_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) /* Just in case the implementation of atomic operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) * change so that an explicit initialization is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) atomic_set(&cp->reset_task_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) atomic_set(&cp->reset_task_pending_all, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) atomic_set(&cp->reset_task_pending_spare, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) atomic_set(&cp->reset_task_pending_mtu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) INIT_WORK(&cp->reset_task, cas_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) /* Default link parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) if (link_mode >= 0 && link_mode < 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) cp->link_cntl = link_modes[link_mode];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) cp->link_cntl = BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) cp->lstate = link_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) cp->link_transition = LINK_TRANSITION_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) netif_carrier_off(cp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) cp->timer_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) /* give us access to cassini registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) cp->regs = pci_iomap(pdev, 0, casreg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) if (!cp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) cp->casreg_len = casreg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) cas_check_pci_invariants(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) cas_hard_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) if (cas_check_invariants(cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) if (cp->cas_flags & CAS_FLAG_SATURN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) cas_saturn_firmware_init(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) cp->init_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) &cp->block_dvma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) if (!cp->init_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) for (i = 0; i < N_TX_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) cp->init_txds[i] = cp->init_block->txds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) for (i = 0; i < N_RX_DESC_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) cp->init_rxds[i] = cp->init_block->rxds[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) for (i = 0; i < N_RX_COMP_RINGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) cp->init_rxcs[i] = cp->init_block->rxcs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) for (i = 0; i < N_RX_FLOWS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) skb_queue_head_init(&cp->rx_flows[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) dev->netdev_ops = &cas_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) dev->ethtool_ops = &cas_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) dev->watchdog_timeo = CAS_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) #ifdef USE_NAPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) netif_napi_add(dev, &cp->napi, cas_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) dev->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) /* Cassini features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) if (pci_using_dac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) dev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) /* MTU range: 60 - varies or 9000 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) dev->min_mtu = CAS_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) dev->max_mtu = CAS_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) if (register_netdev(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) dev_err(&pdev->dev, "Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) goto err_out_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) i = readl(cp->regs + REG_BIM_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) (i & BIM_CFG_32BIT) ? "32" : "64",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) (i & BIM_CFG_66MHZ) ? "66" : "33",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) cp->hw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) cas_entropy_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) cas_phy_init(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) cas_begin_auto_negotiation(cp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) err_out_free_consistent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) cp->init_block, cp->block_dvma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) if (cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) cas_shutdown(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) pci_iounmap(pdev, cp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) /* Try to restore it in case the error occurred after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) * set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) err_out_free_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) err_out_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) static void cas_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) struct cas *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) vfree(cp->fw_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) cancel_work_sync(&cp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) if (cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) cas_shutdown(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) if (cp->orig_cacheline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) /* Restore the cache line size if we had modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) cp->orig_cacheline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) cp->init_block, cp->block_dvma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) pci_iounmap(pdev, cp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) static int __maybe_unused cas_suspend(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) /* If the driver is opened, we stop the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) if (cp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) /* We can set the second arg of cas_reset to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) * because on resume, we'll call cas_init_hw with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) * its second arg set so that autonegotiation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) * restarted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) if (cp->hw_running)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) cas_shutdown(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) static int __maybe_unused cas_resume(struct device *dev_d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) struct net_device *dev = dev_get_drvdata(dev_d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) struct cas *cp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) netdev_info(dev, "resuming\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) mutex_lock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) cas_hard_reset(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) if (cp->opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) cas_lock_all_save(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) cas_reset(cp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) cp->hw_running = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) cas_clean_rings(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) cas_init_hw(cp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) cas_unlock_all_restore(cp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) mutex_unlock(&cp->pm_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) static struct pci_driver cas_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) .name = DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) .id_table = cas_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) .probe = cas_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) .remove = cas_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) .driver.pm = &cas_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) static int __init cas_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) if (linkdown_timeout > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) link_transition_timeout = linkdown_timeout * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) link_transition_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) return pci_register_driver(&cas_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) static void __exit cas_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) pci_unregister_driver(&cas_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) module_init(cas_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) module_exit(cas_cleanup);