^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * tg3.c: Broadcom Tigon3 ethernet driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004 Sun Microsystems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2005-2016 Broadcom Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2016-2017 Broadcom Limited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * refers to Broadcom Inc. and/or its subsidiaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Firmware is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Derived from proprietary unpublished source code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 2000-2016 Broadcom Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Copyright (C) 2016-2017 Broadcom Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * refers to Broadcom Inc. and/or its subsidiaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Permission is hereby granted for the distribution of this firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * data in hexadecimal or equivalent format, provided this copyright
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * notice is accompanying it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/mdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/brcmphy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/ssb/ssb_driver_gige.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <linux/hwmon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <linux/hwmon-sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/crc32poly.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include <uapi/linux/net_tstamp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include <linux/ptp_clock_kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define BAR_0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define BAR_2 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include "tg3.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Functions & macros to verify TG3_FLAGS types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return test_bit(flag, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) set_bit(flag, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) clear_bit(flag, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define tg3_flag(tp, flag) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define tg3_flag_set(tp, flag) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define tg3_flag_clear(tp, flag) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define DRV_MODULE_NAME "tg3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* DO NOT UPDATE TG3_*_NUM defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define TG3_MAJ_NUM 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define TG3_MIN_NUM 137
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define RESET_KIND_SHUTDOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define RESET_KIND_INIT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define RESET_KIND_SUSPEND 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define TG3_DEF_RX_MODE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define TG3_DEF_TX_MODE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define TG3_DEF_MSG_ENABLE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (NETIF_MSG_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) NETIF_MSG_PROBE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) NETIF_MSG_LINK | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) NETIF_MSG_TIMER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) NETIF_MSG_IFDOWN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) NETIF_MSG_IFUP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) NETIF_MSG_RX_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) NETIF_MSG_TX_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* length of time before we decide the hardware is borked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * and dev->tx_timeout() should be called to fix the problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define TG3_TX_TIMEOUT (5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* hardware minimum and maximum for a single frame's data payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define TG3_MIN_MTU ETH_ZLEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define TG3_MAX_MTU(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* These numbers seem to be hard coded in the NIC firmware somehow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * You can't change the ring sizes, but you can change where you place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * them in the NIC onboard memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define TG3_RX_STD_RING_SIZE(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define TG3_DEF_RX_RING_PENDING 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define TG3_RX_JMB_RING_SIZE(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define TG3_DEF_RX_JUMBO_RING_PENDING 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Do not place this n-ring entries value into the tp struct itself,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * we really want to expose these constants to GCC so that modulo et
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * al. operations are done with shifts and masks instead of with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * hw multiply/modulo instructions. Another solution would be to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * replace things like '% foo' with '& (foo - 1)'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define TG3_TX_RING_SIZE 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define TG3_RX_STD_RING_BYTES(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define TG3_RX_JMB_RING_BYTES(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define TG3_RX_RCB_RING_BYTES(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) TG3_TX_RING_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define TG3_DMA_BYTE_ENAB 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define TG3_RX_STD_DMA_SZ 1536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define TG3_RX_JMB_DMA_SZ 9046
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Due to a hardware bug, the 5701 can only DMA to memory addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * that are at least dword aligned when used in PCIX mode. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * works around this bug by double copying the packet. This workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * is built into the normal double copy length check for efficiency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * However, the double copy is only necessary on those architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * where unaligned memory accesses are inefficient. For those architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * where unaligned memory accesses incur little penalty, we can reintegrate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * the 5701 in the normal rx path. Doing so saves a device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * dereference by hardcoding the double copy threshold in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define TG3_RX_COPY_THRESHOLD 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #if (NET_IP_ALIGN != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* minimum number of free TX descriptors required to wake up TX process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define TG3_TX_BD_DMA_MAX_2K 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define TG3_TX_BD_DMA_MAX_4K 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define TG3_RAW_IP_ALIGN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define TG3_FW_UPDATE_TIMEOUT_SEC 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #define FIRMWARE_TG3 "tigon/tg3.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) #define FIRMWARE_TG357766 "tigon/tg357766.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) MODULE_FIRMWARE(FIRMWARE_TG3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) MODULE_FIRMWARE(FIRMWARE_TG3TSO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) module_param(tg3_debug, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static const struct pci_device_id tg3_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) TG3_DRV_DATA_FLAG_5705_10_100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) TG3_DRV_DATA_FLAG_5705_10_100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) TG3_DRV_DATA_FLAG_5705_10_100},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) PCI_VENDOR_ID_LENOVO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) } ethtool_stats_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) { "rx_octets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) { "rx_fragments" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) { "rx_ucast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) { "rx_mcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) { "rx_bcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) { "rx_fcs_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) { "rx_align_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) { "rx_xon_pause_rcvd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) { "rx_xoff_pause_rcvd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) { "rx_mac_ctrl_rcvd" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) { "rx_xoff_entered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) { "rx_frame_too_long_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) { "rx_jabbers" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) { "rx_undersize_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) { "rx_in_length_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) { "rx_out_length_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) { "rx_64_or_less_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) { "rx_65_to_127_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) { "rx_128_to_255_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) { "rx_256_to_511_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) { "rx_512_to_1023_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) { "rx_1024_to_1522_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) { "rx_1523_to_2047_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) { "rx_2048_to_4095_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) { "rx_4096_to_8191_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) { "rx_8192_to_9022_octet_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) { "tx_octets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) { "tx_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) { "tx_xon_sent" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) { "tx_xoff_sent" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) { "tx_flow_control" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) { "tx_mac_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) { "tx_single_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) { "tx_mult_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) { "tx_deferred" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) { "tx_excessive_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) { "tx_late_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) { "tx_collide_2times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) { "tx_collide_3times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) { "tx_collide_4times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) { "tx_collide_5times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) { "tx_collide_6times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) { "tx_collide_7times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) { "tx_collide_8times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) { "tx_collide_9times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) { "tx_collide_10times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) { "tx_collide_11times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) { "tx_collide_12times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) { "tx_collide_13times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) { "tx_collide_14times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) { "tx_collide_15times" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) { "tx_ucast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) { "tx_mcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) { "tx_bcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) { "tx_carrier_sense_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) { "tx_discards" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) { "tx_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) { "dma_writeq_full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) { "dma_write_prioq_full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) { "rxbds_empty" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) { "rx_discards" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) { "rx_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) { "rx_threshold_hit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) { "dma_readq_full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) { "dma_read_prioq_full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) { "tx_comp_queue_full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) { "ring_set_send_prod_index" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) { "ring_status_update" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) { "nic_irqs" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) { "nic_avoided_irqs" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) { "nic_tx_threshold_hit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) { "mbuf_lwm_thresh_hit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #define TG3_NVRAM_TEST 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #define TG3_LINK_TEST 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #define TG3_REGISTER_TEST 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) #define TG3_MEMORY_TEST 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define TG3_MAC_LOOPB_TEST 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #define TG3_PHY_LOOPB_TEST 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #define TG3_EXT_LOOPB_TEST 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #define TG3_INTERRUPT_TEST 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) const char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) } ethtool_test_keys[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) [TG3_NVRAM_TEST] = { "nvram test (online) " },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) [TG3_LINK_TEST] = { "link test (online) " },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) [TG3_REGISTER_TEST] = { "register test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) [TG3_MEMORY_TEST] = { "memory test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) writel(val, tp->regs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static u32 tg3_read32(struct tg3 *tp, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return readl(tp->regs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) writel(val, tp->aperegs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return readl(tp->aperegs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) writel(val, tp->regs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) readl(tp->regs + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) TG3_64BIT_REG_LOW, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (off == TG3_RX_STD_PROD_IDX_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) TG3_64BIT_REG_LOW, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* In indirect mode when disabling interrupts, we also need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * to clear the interrupt bit in the GRC local ctrl register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) (val == 0x1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* usec_wait specifies the wait time in usec when writing to certain registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * where it is unsafe to read back the register without some delay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* Non-posted methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) tp->write32(tp, off, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Posted method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) tg3_write32(tp, off, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (usec_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) udelay(usec_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) tp->read32(tp, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Wait again after the read for the posted method to guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * the wait time is met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (usec_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) udelay(usec_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) tp->write32_mbox(tp, off, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) !tg3_flag(tp, ICH_WORKAROUND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tp->read32_mbox(tp, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void __iomem *mbox = tp->regs + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) writel(val, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (tg3_flag(tp, TXD_MBOX_HWBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) writel(val, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) tg3_flag(tp, FLUSH_POSTED_WRITES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) readl(mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return readl(tp->regs + off + GRCMBOX_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) writel(val, tp->regs + off + GRCMBOX_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) #define tw32(reg, val) tp->write32(tp, reg, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #define tr32(reg) tp->read32(tp, reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (tg3_flag(tp, SRAM_USE_CONFIG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Always leave this as zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) tw32_f(TG3PCI_MEM_WIN_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Always leave this as zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_lock_irqsave(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (tg3_flag(tp, SRAM_USE_CONFIG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Always leave this as zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *val = tr32(TG3PCI_MEM_WIN_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Always leave this as zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) spin_unlock_irqrestore(&tp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) static void tg3_ape_lock_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) u32 regbase, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) regbase = TG3_APE_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) regbase = TG3_APE_PER_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* Make sure the driver hasn't any stale locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) case TG3_APE_LOCK_PHY0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) case TG3_APE_LOCK_PHY1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) case TG3_APE_LOCK_PHY2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) case TG3_APE_LOCK_PHY3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) bit = APE_LOCK_GRANT_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) bit = APE_LOCK_GRANT_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) bit = 1 << tp->pci_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) tg3_ape_write32(tp, regbase + 4 * i, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static int tg3_ape_lock(struct tg3 *tp, int locknum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int i, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) u32 status, req, gnt, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (!tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) switch (locknum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case TG3_APE_LOCK_GPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) case TG3_APE_LOCK_GRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) case TG3_APE_LOCK_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (!tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) bit = APE_LOCK_REQ_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) bit = 1 << tp->pci_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) case TG3_APE_LOCK_PHY0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) case TG3_APE_LOCK_PHY1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) case TG3_APE_LOCK_PHY2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) case TG3_APE_LOCK_PHY3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) bit = APE_LOCK_REQ_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (tg3_asic_rev(tp) == ASIC_REV_5761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) req = TG3_APE_LOCK_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) gnt = TG3_APE_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) req = TG3_APE_PER_LOCK_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) gnt = TG3_APE_PER_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) off = 4 * locknum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) tg3_ape_write32(tp, req + off, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Wait for up to 1 millisecond to acquire lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) status = tg3_ape_read32(tp, gnt + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (status == bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (pci_channel_offline(tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (status != bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* Revoke the lock request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) tg3_ape_write32(tp, gnt + off, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void tg3_ape_unlock(struct tg3 *tp, int locknum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u32 gnt, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) switch (locknum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) case TG3_APE_LOCK_GPIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) case TG3_APE_LOCK_GRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) case TG3_APE_LOCK_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (!tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) bit = APE_LOCK_GRANT_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bit = 1 << tp->pci_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) case TG3_APE_LOCK_PHY0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) case TG3_APE_LOCK_PHY1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) case TG3_APE_LOCK_PHY2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) case TG3_APE_LOCK_PHY3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) bit = APE_LOCK_GRANT_DRIVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) gnt = TG3_APE_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) gnt = TG3_APE_PER_LOCK_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) tg3_ape_write32(tp, gnt + 4 * locknum, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) while (timeout_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return timeout_us ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) #ifdef CONFIG_TIGON3_HWMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 i, apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) for (i = 0; i < timeout_us / 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return i == timeout_us / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u32 i, bufoff, msgoff, maxlen, apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (!tg3_flag(tp, APE_HAS_NCSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (apedata != APE_SEG_SIG_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!(apedata & APE_FW_STATUS_READY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) TG3_APE_SHMEM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) msgoff = bufoff + 2 * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /* Cap xfer sizes to scratchpad limits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) length = (len > maxlen) ? maxlen : len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) len -= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!(apedata & APE_FW_STATUS_READY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Wait for up to 1 msec for APE to service previous event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) err = tg3_ape_event_lock(tp, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) apedata = APE_EVENT_STATUS_DRIVER_EVNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) APE_EVENT_STATUS_SCRTCHPD_READ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) APE_EVENT_STATUS_EVENT_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) tg3_ape_write32(tp, bufoff, base_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) tg3_ape_write32(tp, bufoff + sizeof(u32), length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) base_off += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (tg3_ape_wait_for_event(tp, 30000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) for (i = 0; length; i += 4, length -= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) u32 val = tg3_ape_read32(tp, msgoff + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) memcpy(data, &val, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static int tg3_ape_send_event(struct tg3 *tp, u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) u32 apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (apedata != APE_SEG_SIG_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (!(apedata & APE_FW_STATUS_READY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* Wait for up to 20 millisecond for APE to service previous event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) err = tg3_ape_event_lock(tp, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) event | APE_EVENT_STATUS_EVENT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) u32 apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case RESET_KIND_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) APE_HOST_SEG_SIG_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) APE_HOST_SEG_LEN_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) APE_HOST_BEHAV_NO_PHYLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) TG3_APE_HOST_DRVR_STATE_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) event = APE_EVENT_STATUS_STATE_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) case RESET_KIND_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (device_may_wakeup(&tp->pdev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) tg3_flag(tp, WOL_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) TG3_APE_HOST_WOL_SPEED_AUTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) apedata = TG3_APE_HOST_DRVR_STATE_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) event = APE_EVENT_STATUS_STATE_UNLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) tg3_ape_send_event(tp, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static void tg3_send_ape_heartbeat(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) unsigned long interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* Check if hb interval has exceeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!tg3_flag(tp, ENABLE_APE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) time_before(jiffies, tp->ape_hb_jiffies + interval))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) tp->ape_hb_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static void tg3_disable_ints(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) tw32(TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) for (i = 0; i < tp->irq_max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void tg3_enable_ints(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) tp->irq_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) tw32(TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (tg3_flag(tp, 1SHOT_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) tp->coal_now |= tnapi->coal_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* Force an initial interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!tg3_flag(tp, TAGGED_STATUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tw32(HOSTCC_MODE, tp->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) unsigned int work_exists = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /* check for phy events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (sblk->status & SD_STATUS_LINK_CHG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) work_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* check for TX work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) work_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* check for RX work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (tnapi->rx_rcb_prod_idx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) work_exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return work_exists;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* tg3_int_reenable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * similar to tg3_enable_ints, but it accurately determines whether there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * is new work pending and can return without flushing the PIO write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * which reenables interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static void tg3_int_reenable(struct tg3_napi *tnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* When doing tagged status, this work check is unnecessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * The last_tag we write above tells the chip which piece of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * work we've completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) tw32(HOSTCC_MODE, tp->coalesce_mode |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) HOSTCC_MODE_ENABLE | tnapi->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void tg3_switch_clocks(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) u32 clock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) u32 orig_clock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) orig_clock_ctrl = clock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) CLOCK_CTRL_CLKRUN_OENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) tp->pci_clock_ctrl = clock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) tw32_wait_f(TG3PCI_CLOCK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) clock_ctrl | CLOCK_CTRL_625_CORE, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tw32_wait_f(TG3PCI_CLOCK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) clock_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) tw32_wait_f(TG3PCI_CLOCK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) clock_ctrl | (CLOCK_CTRL_ALTCLK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) #define PHY_BUSY_LOOPS 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) u32 frame_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) unsigned int loops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) tw32_f(MAC_MI_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) tg3_ape_lock(tp, tp->phy_ape_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) *val = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) MI_COM_PHY_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) MI_COM_REG_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) frame_val |= (MI_COM_CMD_READ | MI_COM_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) tw32_f(MAC_MI_COM, frame_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) loops = PHY_BUSY_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) while (loops != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) frame_val = tr32(MAC_MI_COM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if ((frame_val & MI_COM_BUSY) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) frame_val = tr32(MAC_MI_COM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) loops -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (loops != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) *val = frame_val & MI_COM_DATA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tw32_f(MAC_MI_MODE, tp->mi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) tg3_ape_unlock(tp, tp->phy_ape_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return __tg3_readphy(tp, tp->phy_addr, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) u32 frame_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) unsigned int loops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) tw32_f(MAC_MI_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) tg3_ape_lock(tp, tp->phy_ape_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) MI_COM_PHY_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) MI_COM_REG_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) frame_val |= (val & MI_COM_DATA_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) tw32_f(MAC_MI_COM, frame_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) loops = PHY_BUSY_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) while (loops != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) frame_val = tr32(MAC_MI_COM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if ((frame_val & MI_COM_BUSY) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) frame_val = tr32(MAC_MI_COM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) loops -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) if (loops != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) tw32_f(MAC_MI_MODE, tp->mi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) tg3_ape_unlock(tp, tp->phy_ape_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return __tg3_writephy(tp, tp->phy_addr, reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) MII_TG3_MMD_CTRL_DATA_NOINC | devad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) MII_TG3_MMD_CTRL_DATA_NOINC | devad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) MII_TG3_AUXCTL_SHDWSEL_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) set |= MII_TG3_AUXCTL_MISC_WREN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) val | MII_TG3_AUXCTL_ACTL_TX_6DB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return tg3_writephy(tp, MII_TG3_MISC_SHDW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) reg | val | MII_TG3_MISC_SHDW_WREN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int tg3_bmcr_reset(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) u32 phy_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int limit, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* OK, reset it, and poll the BMCR_RESET bit until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * clears or we time out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) phy_control = BMCR_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) err = tg3_writephy(tp, MII_BMCR, phy_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) limit = 5000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) while (limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) err = tg3_readphy(tp, MII_BMCR, &phy_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if ((phy_control & BMCR_RESET) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) struct tg3 *tp = bp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (__tg3_readphy(tp, mii_id, reg, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) val = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) struct tg3 *tp = bp->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (__tg3_writephy(tp, mii_id, reg, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static void tg3_mdio_config_5785(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case PHY_ID_BCM50610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) case PHY_ID_BCM50610M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) val = MAC_PHYCFG2_50610_LED_MODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) case PHY_ID_BCMAC131:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) val = MAC_PHYCFG2_AC131_LED_MODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) case PHY_ID_RTL8211C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) val = MAC_PHYCFG2_RTL8211C_LED_MODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) case PHY_ID_RTL8201E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) val = MAC_PHYCFG2_RTL8201E_LED_MODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) tw32(MAC_PHYCFG2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) val = tr32(MAC_PHYCFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) val &= ~(MAC_PHYCFG1_RGMII_INT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) tw32(MAC_PHYCFG1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) val |= MAC_PHYCFG2_EMODE_MASK_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) MAC_PHYCFG2_FMODE_MASK_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) MAC_PHYCFG2_GMODE_MASK_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) MAC_PHYCFG2_ACT_MASK_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) MAC_PHYCFG2_QUAL_MASK_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) MAC_PHYCFG2_INBAND_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tw32(MAC_PHYCFG2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) val = tr32(MAC_PHYCFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) tw32(MAC_PHYCFG1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) val = tr32(MAC_EXT_RGMII_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) val &= ~(MAC_RGMII_MODE_RX_INT_B |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) MAC_RGMII_MODE_RX_QUALITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) MAC_RGMII_MODE_RX_ACTIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) MAC_RGMII_MODE_RX_ENG_DET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) MAC_RGMII_MODE_TX_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) MAC_RGMII_MODE_TX_LOWPWR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) MAC_RGMII_MODE_TX_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) val |= MAC_RGMII_MODE_RX_INT_B |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) MAC_RGMII_MODE_RX_QUALITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) MAC_RGMII_MODE_RX_ACTIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) MAC_RGMII_MODE_RX_ENG_DET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) val |= MAC_RGMII_MODE_TX_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) MAC_RGMII_MODE_TX_LOWPWR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) MAC_RGMII_MODE_TX_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) tw32(MAC_EXT_RGMII_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static void tg3_mdio_start(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) tw32_f(MAC_MI_MODE, tp->mi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (tg3_flag(tp, MDIOBUS_INITED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) tg3_asic_rev(tp) == ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) tg3_mdio_config_5785(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) static int tg3_mdio_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (tg3_flag(tp, 5717_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u32 is_serdes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) tp->phy_addr = tp->pci_fn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) TG3_CPMU_PHY_STRAP_IS_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (is_serdes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) tp->phy_addr += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) int addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) addr = ssb_gige_get_phyaddr(tp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (addr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) tp->phy_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) tp->phy_addr = TG3_PHY_MII_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) tg3_mdio_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) tp->mdio_bus = mdiobus_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (tp->mdio_bus == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) tp->mdio_bus->name = "tg3 mdio bus";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) (tp->pdev->bus->number << 8) | tp->pdev->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) tp->mdio_bus->priv = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) tp->mdio_bus->parent = &tp->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) tp->mdio_bus->read = &tg3_mdio_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) tp->mdio_bus->write = &tg3_mdio_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /* The bus registration will look for all the PHYs on the mdio bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * Unfortunately, it does not ensure the PHY is powered up before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * accessing the PHY ID registers. A chip reset is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * quickest way to bring the device back to an operational state..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) tg3_bmcr_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) i = mdiobus_register(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) mdiobus_free(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!phydev || !phydev->drv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) dev_warn(&tp->pdev->dev, "No PHY devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) mdiobus_unregister(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) mdiobus_free(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) case PHY_ID_BCM57780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) phydev->interface = PHY_INTERFACE_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) case PHY_ID_BCM50610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) case PHY_ID_BCM50610M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) PHY_BRCM_RX_REFCLK_UNUSED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) PHY_BRCM_DIS_TXCRXC_NOENRGY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) PHY_BRCM_AUTO_PWRDWN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (tg3_flag(tp, RGMII_INBAND_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) case PHY_ID_RTL8211C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) phydev->interface = PHY_INTERFACE_MODE_RGMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) case PHY_ID_RTL8201E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) case PHY_ID_BCMAC131:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) phydev->interface = PHY_INTERFACE_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) tp->phy_flags |= TG3_PHYFLG_IS_FET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) tg3_flag_set(tp, MDIOBUS_INITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (tg3_asic_rev(tp) == ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) tg3_mdio_config_5785(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) static void tg3_mdio_fini(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (tg3_flag(tp, MDIOBUS_INITED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) tg3_flag_clear(tp, MDIOBUS_INITED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) mdiobus_unregister(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) mdiobus_free(tp->mdio_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static inline void tg3_generate_fw_event(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) val = tr32(GRC_RX_CPU_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) val |= GRC_RX_CPU_DRIVER_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) tw32_f(GRC_RX_CPU_EVENT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) tp->last_event_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) #define TG3_FW_EVENT_TIMEOUT_USEC 2500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void tg3_wait_for_event_ack(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) unsigned int delay_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) long time_remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) /* If enough time has passed, no wait is necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) time_remain = (long)(tp->last_event_jiffies + 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) (long)jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (time_remain < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* Check if we can shorten the wait time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) delay_cnt = jiffies_to_usecs(time_remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) delay_cnt = (delay_cnt >> 3) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) for (i = 0; i < delay_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) if (pci_channel_offline(tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) udelay(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) u32 reg, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (!tg3_readphy(tp, MII_BMCR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) val = reg << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (!tg3_readphy(tp, MII_BMSR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) val |= (reg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) *data++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (!tg3_readphy(tp, MII_ADVERTISE, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) val = reg << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (!tg3_readphy(tp, MII_LPA, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) val |= (reg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) *data++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (!tg3_readphy(tp, MII_CTRL1000, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) val = reg << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (!tg3_readphy(tp, MII_STAT1000, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) val |= (reg & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) *data++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) if (!tg3_readphy(tp, MII_PHYADDR, ®))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) val = reg << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) *data++ = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static void tg3_ump_link_report(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) u32 data[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) tg3_phy_gather_ump_data(tp, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) tg3_wait_for_event_ack(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) tg3_generate_fw_event(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) static void tg3_stop_fw(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /* Wait for RX cpu to ACK the previous event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) tg3_wait_for_event_ack(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) tg3_generate_fw_event(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* Wait for RX cpu to ACK this event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) tg3_wait_for_event_ack(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) case RESET_KIND_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) DRV_STATE_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) case RESET_KIND_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) DRV_STATE_UNLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) case RESET_KIND_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) DRV_STATE_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) case RESET_KIND_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) DRV_STATE_START_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) case RESET_KIND_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) DRV_STATE_UNLOAD_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) switch (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) case RESET_KIND_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) DRV_STATE_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) case RESET_KIND_SHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) DRV_STATE_UNLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) case RESET_KIND_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) DRV_STATE_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static int tg3_poll_fw(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (tg3_flag(tp, NO_FWARE_REPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (tg3_flag(tp, IS_SSB_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /* We don't use firmware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* Wait up to 20ms for init done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) for (i = 0; i < 200; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (pci_channel_offline(tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /* Wait for firmware initialization to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) for (i = 0; i < 100000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (pci_channel_offline(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) tg3_flag_set(tp, NO_FWARE_REPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) netdev_info(tp->dev, "No firmware running\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) /* Chip might not be fitted with firmware. Some Sun onboard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * parts are configured like that. So don't signal the timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * of the above loop as an error, but do report the lack of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * running firmware once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) tg3_flag_set(tp, NO_FWARE_REPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) netdev_info(tp->dev, "No firmware running\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /* The 57765 A0 needs a little more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * time to do some important work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) static void tg3_link_report(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (!netif_carrier_ok(tp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) netif_info(tp, link, tp->dev, "Link is down\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) tg3_ump_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) } else if (netif_msg_link(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) (tp->link_config.active_speed == SPEED_1000 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 1000 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) (tp->link_config.active_speed == SPEED_100 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 100 : 10)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) (tp->link_config.active_duplex == DUPLEX_FULL ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) "full" : "half"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) "on" : "off",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) "on" : "off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) netdev_info(tp->dev, "EEE is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) tp->setlpicnt ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) tg3_ump_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) tp->link_up = netif_carrier_ok(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static u32 tg3_decode_flowctrl_1000T(u32 adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) u32 flowctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (adv & ADVERTISE_PAUSE_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) flowctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (!(adv & ADVERTISE_PAUSE_ASYM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) } else if (adv & ADVERTISE_PAUSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) return flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) u16 miireg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) miireg = ADVERTISE_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) else if (flow_ctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) miireg = ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) else if (flow_ctrl & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) miireg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return miireg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) static u32 tg3_decode_flowctrl_1000X(u32 adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) u32 flowctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (adv & ADVERTISE_1000XPAUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) flowctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (!(adv & ADVERTISE_1000XPSE_ASYM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) } else if (adv & ADVERTISE_1000XPSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) return flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) u8 cap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) if (lcladv & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) cap = FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (rmtadv & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) cap = FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) u8 autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) u8 flowctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) u32 old_rx_mode = tp->rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) u32 old_tx_mode = tp->tx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (tg3_flag(tp, USE_PHYLIB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) autoneg = tp->link_config.autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) flowctrl = tp->link_config.flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) tp->link_config.active_flowctrl = flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (flowctrl & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (old_rx_mode != tp->rx_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) tw32_f(MAC_RX_MODE, tp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (flowctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (old_tx_mode != tp->tx_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) tw32_f(MAC_TX_MODE, tp->tx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static void tg3_adjust_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) u8 oldflowctrl, linkmesg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) u32 mac_mode, lcl_adv, rmt_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) MAC_MODE_HALF_DUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) oldflowctrl = tp->link_config.active_flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (phydev->link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) lcl_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) else if (phydev->speed == SPEED_1000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) tg3_asic_rev(tp) != ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (phydev->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) mac_mode |= MAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) lcl_adv = mii_advertise_flowctrl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (phydev->pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) rmt_adv = LPA_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (phydev->asym_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) rmt_adv |= LPA_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (mac_mode != tp->mac_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) tp->mac_mode = mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (tg3_asic_rev(tp) == ASIC_REV_5785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (phydev->speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) tw32(MAC_MI_STAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) MAC_MI_STAT_10MBPS_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) tw32(MAC_TX_LENGTHS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) (6 << TX_LENGTHS_IPG_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) tw32(MAC_TX_LENGTHS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) (6 << TX_LENGTHS_IPG_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (phydev->link != tp->old_link ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) phydev->speed != tp->link_config.active_speed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) phydev->duplex != tp->link_config.active_duplex ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) oldflowctrl != tp->link_config.active_flowctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) linkmesg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) tp->old_link = phydev->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) tp->link_config.active_speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) tp->link_config.active_duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (linkmesg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) tg3_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static int tg3_phy_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* Bring the PHY back to a known state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) tg3_bmcr_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* Attach the MAC to the PHY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) phydev = phy_connect(tp->dev, phydev_name(phydev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) tg3_adjust_link, phydev->interface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (IS_ERR(phydev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return PTR_ERR(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /* Mask with MAC supported features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) switch (phydev->interface) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case PHY_INTERFACE_MODE_GMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) case PHY_INTERFACE_MODE_RGMII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) phy_set_max_speed(phydev, SPEED_1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) phy_support_asym_pause(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) case PHY_INTERFACE_MODE_MII:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) phy_set_max_speed(phydev, SPEED_100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) phy_support_asym_pause(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) phy_attached_info(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) static void tg3_phy_start(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) phydev->speed = tp->link_config.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) phydev->duplex = tp->link_config.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) phydev->autoneg = tp->link_config.autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) ethtool_convert_legacy_u32_to_link_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) phydev->advertising, tp->link_config.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) phy_start(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) phy_start_aneg(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static void tg3_phy_stop(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) static void tg3_phy_fini(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static int tg3_phy_set_extloopbk(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) if (tp->phy_flags & TG3_PHYFLG_IS_FET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* Cannot do read-modify-write on 5401 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) err = tg3_phy_auxctl_write(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 0x4c20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) err = tg3_phy_auxctl_read(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) err = tg3_phy_auxctl_write(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) u32 phytest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) u32 phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) tg3_writephy(tp, MII_TG3_FET_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) phytest | MII_TG3_FET_SHADOW_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (!tg3_flag(tp, 5705_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) (tg3_flag(tp, 5717_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) tg3_phy_fet_toggle_apd(tp, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) reg = MII_TG3_MISC_SHDW_SCR5_LPED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) MII_TG3_MISC_SHDW_SCR5_DLPTLM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) MII_TG3_MISC_SHDW_SCR5_SDTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) MII_TG3_MISC_SHDW_SCR5_C125OE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) u32 phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (!tg3_flag(tp, 5705_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) u32 ephy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) tg3_writephy(tp, MII_TG3_FET_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ephy | MII_TG3_FET_SHADOW_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (!tg3_readphy(tp, reg, &phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) tg3_writephy(tp, reg, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ret = tg3_phy_auxctl_read(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) tg3_phy_auxctl_write(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static void tg3_phy_set_wirespeed(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) static void tg3_phy_apply_otp(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) u32 otp, phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (!tp->phy_otp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) otp = tp->phy_otp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (tg3_phy_toggle_auxctl_smdsp(tp, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct ethtool_eee *dest = &tp->eee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (eee)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) dest = eee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) /* Pull eee_active */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) dest->eee_active = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) dest->eee_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) /* Pull lp advertised settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) /* Pull advertised and eee_enabled settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) dest->eee_enabled = !!val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) /* Pull tx_lpi_enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) val = tr32(TG3_CPMU_EEE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* Pull lpi timer value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) tp->setlpicnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (tp->link_config.autoneg == AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) tp->link_config.active_duplex == DUPLEX_FULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) (tp->link_config.active_speed == SPEED_100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) tp->link_config.active_speed == SPEED_1000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) u32 eeectl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (tp->link_config.active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) tw32(TG3_CPMU_EEE_CTRL, eeectl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) tg3_eee_pull_config(tp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (tp->eee.eee_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) tp->setlpicnt = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (!tp->setlpicnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) val = tr32(TG3_CPMU_EEE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) static void tg3_phy_eee_enable(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (tp->link_config.active_speed == SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) tg3_flag(tp, 57765_CLASS)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) val = MII_TG3_DSP_TAP26_ALNOKO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) MII_TG3_DSP_TAP26_RMRXSTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) val = tr32(TG3_CPMU_EEE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) static int tg3_wait_macro_done(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) int limit = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) while (limit--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if ((tmp32 & 0x1000) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (limit < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) static const u32 test_pat[4][6] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) int chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) for (chan = 0; chan < 4; chan++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) (chan * 0x2000) | 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) test_pat[chan][i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (tg3_wait_macro_done(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) *resetp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) (chan * 0x2000) | 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) if (tg3_wait_macro_done(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) *resetp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (tg3_wait_macro_done(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) *resetp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) for (i = 0; i < 6; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) u32 low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) tg3_wait_macro_done(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) *resetp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) low &= 0x7fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) high &= 0x000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (low != test_pat[chan][i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) high != test_pat[chan][i+1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) static int tg3_phy_reset_chanpat(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) int chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) for (chan = 0; chan < 4; chan++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) (chan * 0x2000) | 0x0200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) if (tg3_wait_macro_done(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) u32 reg32, phy9_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) int retries, do_phy_reset, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) do_phy_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) if (do_phy_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) err = tg3_bmcr_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) do_phy_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /* Disable transmitter and interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) reg32 |= 0x3000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* Set full-duplex, 1000 mbps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) tg3_writephy(tp, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) BMCR_FULLDPLX | BMCR_SPEED1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /* Set to master mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) tg3_writephy(tp, MII_CTRL1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) err = tg3_phy_toggle_auxctl_smdsp(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) /* Block the PHY control access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) tg3_phydsp_write(tp, 0x8005, 0x0800);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) err = tg3_phy_reset_chanpat(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) tg3_phydsp_write(tp, 0x8005, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) tg3_writephy(tp, MII_CTRL1000, phy9_orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) reg32 &= ~0x3000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) static void tg3_carrier_off(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) netif_carrier_off(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) tp->link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) "Management side-band traffic will be interrupted during phy settings change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) /* This will reset the tigon3 PHY if there is no valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * link unless the FORCE argument is non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) static int tg3_phy_reset(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) u32 val, cpmuctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) val = tr32(GRC_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) err = tg3_readphy(tp, MII_BMSR, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) err |= tg3_readphy(tp, MII_BMSR, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (netif_running(tp->dev) && tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) netif_carrier_off(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) tg3_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) tg3_asic_rev(tp) == ASIC_REV_5704 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) err = tg3_phy_reset_5703_4_5(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) cpmuctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) tg3_chip_rev(tp) != CHIPREV_5784_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) cpmuctrl = tr32(TG3_CPMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) tw32(TG3_CPMU_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) err = tg3_bmcr_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) tw32(TG3_CPMU_CTRL, cpmuctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) tg3_chip_rev(tp) == CHIPREV_5761_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) CPMU_LSPD_1000MB_MACCLK_12_5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (tg3_flag(tp, 5717_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) tg3_phy_apply_otp(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) tg3_phy_toggle_apd(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) tg3_phy_toggle_apd(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) tg3_phydsp_write(tp, 0x201f, 0x2aaa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) tg3_phydsp_write(tp, 0x000a, 0x0323);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) tg3_phydsp_write(tp, 0x000a, 0x310b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) tg3_phydsp_write(tp, 0x201f, 0x9506);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) tg3_phydsp_write(tp, 0x401f, 0x14e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) tg3_writephy(tp, MII_TG3_TEST1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) MII_TG3_TEST1_TRIM_EN | 0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) /* Set Extended packet length bit (bit 14) on all chips that */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /* support jumbo frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) /* Cannot do read-modify-write on 5401 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) /* Set bit 14 with read-modify-write to preserve other bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) err = tg3_phy_auxctl_read(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) /* Set phy register 0x10 bit 0 to high fifo elasticity to support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * jumbo frames transmission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) if (tg3_flag(tp, JUMBO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) tg3_writephy(tp, MII_TG3_EXT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) /* adjust output voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) tg3_phydsp_write(tp, 0xffb, 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) tg3_phy_toggle_automdix(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) tg3_phy_set_wirespeed(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) TG3_GPIO_MSG_NEED_VAUX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) (TG3_GPIO_MSG_DRVR_PRES << 4) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) (TG3_GPIO_MSG_DRVR_PRES << 8) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) (TG3_GPIO_MSG_DRVR_PRES << 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) (TG3_GPIO_MSG_NEED_VAUX << 4) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) (TG3_GPIO_MSG_NEED_VAUX << 8) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) (TG3_GPIO_MSG_NEED_VAUX << 12))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) u32 status, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) status = tr32(TG3_CPMU_DRV_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) status &= ~(TG3_GPIO_MSG_MASK << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) status |= (newstat << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) tw32(TG3_CPMU_DRV_STATUS, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) return status >> TG3_APE_GPIO_MSG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) if (!tg3_flag(tp, IS_NIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) tg3_asic_rev(tp) == ASIC_REV_5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) u32 grc_local_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (!tg3_flag(tp, IS_NIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) tg3_asic_rev(tp) == ASIC_REV_5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) if (!tg3_flag(tp, IS_NIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) (GRC_LCLCTRL_GPIO_OE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) GRC_LCLCTRL_GPIO_OE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) GRC_LCLCTRL_GPIO_OUTPUT0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) GRC_LCLCTRL_GPIO_OUTPUT1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) GRC_LCLCTRL_GPIO_OE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) GRC_LCLCTRL_GPIO_OUTPUT0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) GRC_LCLCTRL_GPIO_OUTPUT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) tp->grc_local_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) u32 no_gpio2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) u32 grc_local_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) /* Workaround to prevent overdrawing Amps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) /* On 5753 and variants, GPIO2 cannot be used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) no_gpio2 = tp->nic_sram_data_cfg &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) NIC_SRAM_DATA_CFG_NO_GPIO2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) GRC_LCLCTRL_GPIO_OE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) GRC_LCLCTRL_GPIO_OUTPUT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) GRC_LCLCTRL_GPIO_OUTPUT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (no_gpio2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) GRC_LCLCTRL_GPIO_OUTPUT2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) tp->grc_local_ctrl | grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) tp->grc_local_ctrl | grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) if (!no_gpio2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) tw32_wait_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) tp->grc_local_ctrl | grc_local_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) TG3_GRC_LCLCTL_PWRSW_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) u32 msg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /* Serialize power state transitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) msg = TG3_GPIO_MSG_NEED_VAUX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) msg = tg3_set_function_status(tp, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) tg3_pwrsrc_switch_to_vaux(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) tg3_pwrsrc_die_with_vmain(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) bool need_vaux = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) /* The GPIOs do something completely different on 57765. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) tg3_asic_rev(tp) == ASIC_REV_5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) tg3_frob_aux_power_5717(tp, include_wol ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) tg3_flag(tp, WOL_ENABLE) != 0 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct net_device *dev_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) dev_peer = pci_get_drvdata(tp->pdev_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) /* remove_one() may have been run on the peer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (dev_peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) struct tg3 *tp_peer = netdev_priv(dev_peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) if (tg3_flag(tp_peer, INIT_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) tg3_flag(tp_peer, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) need_vaux = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) need_vaux = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (need_vaux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) tg3_pwrsrc_switch_to_vaux(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) tg3_pwrsrc_die_with_vmain(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) if (speed != SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) } else if (speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) static bool tg3_phy_power_bug(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) switch (tg3_asic_rev(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) case ASIC_REV_5700:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) case ASIC_REV_5704:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) case ASIC_REV_5780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) case ASIC_REV_5717:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (!tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) case ASIC_REV_5719:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) case ASIC_REV_5720:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) !tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) static bool tg3_phy_led_bug(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) switch (tg3_asic_rev(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) case ASIC_REV_5719:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) case ASIC_REV_5720:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) !tp->pci_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (tg3_asic_rev(tp) == ASIC_REV_5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) u32 serdes_cfg = tr32(MAC_SERDES_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) sg_dig_ctrl |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) tw32(SG_DIG_CTRL, sg_dig_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) tg3_bmcr_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) val = tr32(GRC_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) u32 phytest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) u32 phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) tg3_writephy(tp, MII_ADVERTISE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) tg3_writephy(tp, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) tg3_writephy(tp, MII_TG3_FET_TEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) phytest | MII_TG3_FET_SHADOW_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) tg3_writephy(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) MII_TG3_FET_SHDW_AUXMODE4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) } else if (do_low_power) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if (!tg3_phy_led_bug(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) tg3_writephy(tp, MII_TG3_EXT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) MII_TG3_EXT_CTRL_FORCE_LED_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) MII_TG3_AUXCTL_PCTL_VREG_11V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) /* The PHY should not be powered down on some chips because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) * of bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (tg3_phy_power_bug(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) tg3_chip_rev(tp) == CHIPREV_5761_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) val |= CPMU_LSPD_1000MB_MACCLK_12_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static int tg3_nvram_lock(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (tg3_flag(tp, NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (tp->nvram_lock_cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) tw32(NVRAM_SWARB, SWARB_REQ_SET1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) for (i = 0; i < 8000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (tr32(NVRAM_SWARB) & SWARB_GNT1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (i == 8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) tp->nvram_lock_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) static void tg3_nvram_unlock(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) if (tg3_flag(tp, NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (tp->nvram_lock_cnt > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) tp->nvram_lock_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) if (tp->nvram_lock_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static void tg3_enable_nvram_access(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) u32 nvaccess = tr32(NVRAM_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static void tg3_disable_nvram_access(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) u32 nvaccess = tr32(NVRAM_ACCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) u32 offset, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) EEPROM_ADDR_DEVID_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) EEPROM_ADDR_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) tw32(GRC_EEPROM_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) tmp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) (0 << EEPROM_ADDR_DEVID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) ((offset << EEPROM_ADDR_ADDR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) EEPROM_ADDR_ADDR_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) EEPROM_ADDR_READ | EEPROM_ADDR_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) for (i = 0; i < 1000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) tmp = tr32(GRC_EEPROM_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (tmp & EEPROM_ADDR_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (!(tmp & EEPROM_ADDR_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) tmp = tr32(GRC_EEPROM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * The data will always be opposite the native endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * format. Perform a blind byteswap to compensate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) *val = swab32(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) #define NVRAM_CMD_TIMEOUT 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) tw32(NVRAM_CMD, nvram_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) usleep_range(10, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (i == NVRAM_CMD_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (tg3_flag(tp, NVRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) tg3_flag(tp, NVRAM_BUFFERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) tg3_flag(tp, FLASH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) (tp->nvram_jedecnum == JEDEC_ATMEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) addr = ((addr / tp->nvram_pagesize) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) ATMEL_AT45DB0X1B_PAGE_POS) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) (addr % tp->nvram_pagesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) if (tg3_flag(tp, NVRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) tg3_flag(tp, NVRAM_BUFFERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) tg3_flag(tp, FLASH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) (tp->nvram_jedecnum == JEDEC_ATMEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) tp->nvram_pagesize) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) /* NOTE: Data read in from NVRAM is byteswapped according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) * the byteswapping settings for all other register accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * tg3 devices are BE devices, so on a BE machine, the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * returned will be exactly as it is seen in NVRAM. On a LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) * machine, the 32-bit value will be byteswapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (!tg3_flag(tp, NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) return tg3_nvram_read_using_eeprom(tp, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) offset = tg3_nvram_phys_addr(tp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if (offset > NVRAM_ADDR_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) ret = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) tg3_enable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) tw32(NVRAM_ADDR, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) *val = tr32(NVRAM_RDDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) tg3_disable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) /* Ensures NVRAM data is in bytestream format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) int res = tg3_nvram_read(tp, offset, &v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) *val = cpu_to_be32(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) u32 offset, u32 len, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) int i, j, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) for (i = 0; i < len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) __be32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) addr = offset + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) memcpy(&data, buf + i, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) * The SEEPROM interface expects the data to always be opposite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) * the native endian format. We accomplish this by reversing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) * all the operations that would have been performed on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) * data from a call to tg3_nvram_read_be32().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) val = tr32(GRC_EEPROM_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) EEPROM_ADDR_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) tw32(GRC_EEPROM_ADDR, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) (0 << EEPROM_ADDR_DEVID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) (addr & EEPROM_ADDR_ADDR_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) EEPROM_ADDR_START |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) EEPROM_ADDR_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) for (j = 0; j < 1000; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) val = tr32(GRC_EEPROM_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (val & EEPROM_ADDR_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (!(val & EEPROM_ADDR_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) /* offset and length are dword aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) u32 pagesize = tp->nvram_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) u32 pagemask = pagesize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) u32 nvram_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) u8 *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) tmp = kmalloc(pagesize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (tmp == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) u32 phy_addr, page_off, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) phy_addr = offset & ~pagemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) for (j = 0; j < pagesize; j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) ret = tg3_nvram_read_be32(tp, phy_addr + j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) (__be32 *) (tmp + j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) page_off = offset & pagemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) size = pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) if (len < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) len -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) memcpy(tmp + page_off, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) offset = offset + (pagesize - page_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) tg3_enable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) * Before we can erase the flash page, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) * to issue a special "write enable" command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) if (tg3_nvram_exec_cmd(tp, nvram_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) /* Erase the target page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) tw32(NVRAM_ADDR, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) if (tg3_nvram_exec_cmd(tp, nvram_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) /* Issue another write enable to start the write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) if (tg3_nvram_exec_cmd(tp, nvram_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) for (j = 0; j < pagesize; j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) __be32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) data = *((__be32 *) (tmp + j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) tw32(NVRAM_WRDATA, be32_to_cpu(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) tw32(NVRAM_ADDR, phy_addr + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) NVRAM_CMD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (j == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) nvram_cmd |= NVRAM_CMD_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) else if (j == (pagesize - 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) nvram_cmd |= NVRAM_CMD_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) tg3_nvram_exec_cmd(tp, nvram_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) /* offset and length are dword aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) for (i = 0; i < len; i += 4, offset += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) u32 page_off, phy_addr, nvram_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) __be32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) memcpy(&data, buf + i, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) tw32(NVRAM_WRDATA, be32_to_cpu(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) page_off = offset % tp->nvram_pagesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) phy_addr = tg3_nvram_phys_addr(tp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (page_off == 0 || i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) nvram_cmd |= NVRAM_CMD_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (page_off == (tp->nvram_pagesize - 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) nvram_cmd |= NVRAM_CMD_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (i == (len - 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) nvram_cmd |= NVRAM_CMD_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if ((nvram_cmd & NVRAM_CMD_FIRST) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) !tg3_flag(tp, FLASH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) !tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) tw32(NVRAM_ADDR, phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) !tg3_flag(tp, 5755_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) (tp->nvram_jedecnum == JEDEC_ST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) (nvram_cmd & NVRAM_CMD_FIRST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) ret = tg3_nvram_exec_cmd(tp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) if (!tg3_flag(tp, FLASH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) /* We always do complete word writes to eeprom. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) /* offset and length are dword aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) ~GRC_LCLCTRL_GPIO_OUTPUT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (!tg3_flag(tp, NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) u32 grc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) ret = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) tg3_enable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) tw32(NVRAM_WRITE1, 0x406);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) grc_mode = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) ret = tg3_nvram_write_block_buffered(tp, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) grc_mode = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) tg3_disable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) #define RX_CPU_SCRATCH_BASE 0x30000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) #define RX_CPU_SCRATCH_SIZE 0x04000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) #define TX_CPU_SCRATCH_BASE 0x34000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) #define TX_CPU_SCRATCH_SIZE 0x04000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) const int iters = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) for (i = 0; i < iters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) tw32(cpu_base + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) if (pci_channel_offline(tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) return (i == iters) ? -EBUSY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) static int tg3_rxcpu_pause(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) static int tg3_txcpu_pause(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) return tg3_pause_cpu(tp, TX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) tw32(cpu_base + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) tw32_f(cpu_base + CPU_MODE, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) static void tg3_rxcpu_resume(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) tg3_resume_cpu(tp, RX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) u32 val = tr32(GRC_VCPU_EXT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (cpu_base == RX_CPU_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) rc = tg3_rxcpu_pause(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) * There is only an Rx CPU for the 5750 derivative in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * BCM4785.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) if (tg3_flag(tp, IS_SSB_CORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) rc = tg3_txcpu_pause(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) netdev_err(tp->dev, "%s timed out, %s CPU\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) /* Clear firmware's nvram arbitration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (tg3_flag(tp, NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) static int tg3_fw_data_len(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) const struct tg3_firmware_hdr *fw_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) int fw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) /* Non fragmented firmware have one firmware header followed by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) * contiguous chunk of data to be written. The length field in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) * header is not the length of data to be written but the complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) * length of the bss. The data length is determined based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * tp->fw->size minus headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * Fragmented firmware have a main header followed by multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) * fragments. Each fragment is identical to non fragmented firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) * with a firmware header followed by a contiguous chunk of data. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) * the main header, the length field is unused and set to 0xffffffff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) * In each fragment header the length is the entire size of that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) * fragment i.e. fragment data + header length. Data length is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) * therefore length field in the header minus TG3_FW_HDR_LEN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) if (tp->fw_len == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) fw_len = be32_to_cpu(fw_hdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) fw_len = tp->fw->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) u32 cpu_scratch_base, int cpu_scratch_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) const struct tg3_firmware_hdr *fw_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) void (*write_op)(struct tg3 *, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) int total_len = tp->fw->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) "%s: Trying to load TX cpu firmware which is 5705\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) write_op = tg3_write_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) write_op = tg3_write_indirect_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (tg3_asic_rev(tp) != ASIC_REV_57766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) /* It is possible that bootcode is still loading at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) * Get the nvram lock first before halting the cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) int lock_err = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) err = tg3_halt_cpu(tp, cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) if (!lock_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) write_op(tp, cpu_scratch_base + i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) tw32(cpu_base + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) tw32(cpu_base + CPU_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) /* Subtract additional main header for fragmented firmware and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) * advance to the first fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) total_len -= TG3_FW_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) fw_hdr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) u32 *fw_data = (u32 *)(fw_hdr + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) write_op(tp, cpu_scratch_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) (i * sizeof(u32)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) be32_to_cpu(fw_data[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) total_len -= be32_to_cpu(fw_hdr->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) /* Advance to next fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) fw_hdr = (struct tg3_firmware_hdr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) } while (total_len > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) const int iters = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) tw32(cpu_base + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) tw32_f(cpu_base + CPU_PC, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) for (i = 0; i < iters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (tr32(cpu_base + CPU_PC) == pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) tw32(cpu_base + CPU_STATE, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) tw32_f(cpu_base + CPU_PC, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) return (i == iters) ? -EBUSY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) const struct tg3_firmware_hdr *fw_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) /* Firmware blob starts with version numbers, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) start address and length. We are setting complete length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) length = end_address_of_bss - start_address_of_text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) Remainder is the blob to be loaded contiguously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) from start address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) fw_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) fw_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) /* Now startup only the RX cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) be32_to_cpu(fw_hdr->base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) "should be %08x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) tr32(RX_CPU_BASE + CPU_PC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) be32_to_cpu(fw_hdr->base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) tg3_rxcpu_resume(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) static int tg3_validate_rxcpu_state(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) const int iters = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) /* Wait for boot code to complete initialization and enter service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) * loop. It is then safe to download service patches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) for (i = 0; i < iters; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) if (i == iters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) netdev_err(tp->dev, "Boot code not ready for service patches\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) if (val & 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) "Other patches exist. Not downloading EEE patch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) static void tg3_load_57766_firmware(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) struct tg3_firmware_hdr *fw_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) if (!tg3_flag(tp, NO_NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) if (tg3_validate_rxcpu_state(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) if (!tp->fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) /* This firmware blob has a different format than older firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) * releases as given below. The main difference is we have fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) * data to be written to non-contiguous locations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) * In the beginning we have a firmware header identical to other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) * firmware which consists of version, base addr and length. The length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) * here is unused and set to 0xffffffff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) * This is followed by a series of firmware fragments which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) * individually identical to previous firmware. i.e. they have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) * firmware header and followed by data for that fragment. The version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) * field of the individual fragment header is unused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) if (tg3_rxcpu_pause(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) /* tg3_load_firmware_cpu() will always succeed for the 57766 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) tg3_rxcpu_resume(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) static int tg3_load_tso_firmware(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) const struct tg3_firmware_hdr *fw_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) if (!tg3_flag(tp, FW_TSO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /* Firmware blob starts with version numbers, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) start address and length. We are setting complete length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) length = end_address_of_bss - start_address_of_text.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) Remainder is the blob to be loaded contiguously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) from start address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) cpu_scratch_size = tp->fw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) cpu_base = RX_CPU_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) cpu_base = TX_CPU_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) cpu_scratch_base = TX_CPU_SCRATCH_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) err = tg3_load_firmware_cpu(tp, cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) cpu_scratch_base, cpu_scratch_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) fw_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) /* Now startup the cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) be32_to_cpu(fw_hdr->base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) "%s fails to set CPU PC, is %08x should be %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) __func__, tr32(cpu_base + CPU_PC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) be32_to_cpu(fw_hdr->base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) tg3_resume_cpu(tp, cpu_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) u32 addr_high, addr_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) (mac_addr[4] << 8) | mac_addr[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) if (index < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) index -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) u32 addr_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) if (i == 1 && skip_mac_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) tg3_asic_rev(tp) == ASIC_REV_5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) for (i = 4; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) addr_high = (tp->dev->dev_addr[0] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) tp->dev->dev_addr[1] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) tp->dev->dev_addr[2] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) tp->dev->dev_addr[3] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) tp->dev->dev_addr[4] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) tp->dev->dev_addr[5]) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) TX_BACKOFF_SEED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) tw32(MAC_TX_BACKOFF_SEED, addr_high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) static void tg3_enable_register_access(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) * Make sure register accesses (indirect or otherwise) will function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) * correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) pci_write_config_dword(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) static int tg3_power_up(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) tg3_enable_register_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) err = pci_set_power_state(tp->pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) /* Switch out of Vaux if it is a NIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) tg3_pwrsrc_switch_to_vmain(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) netdev_err(tp->dev, "Transition to D0 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) static int tg3_setup_phy(struct tg3 *, bool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) static int tg3_power_down_prepare(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) u32 misc_host_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) bool device_should_wake, do_low_power;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) tg3_enable_register_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) /* Restore the CLKREQ setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) if (tg3_flag(tp, CLKREQ_BUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) PCI_EXP_LNKCTL_CLKREQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) tw32(TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) tg3_flag(tp, WOL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) do_low_power = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) u32 phyid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) tp->link_config.speed = phydev->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) tp->link_config.duplex = phydev->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) tp->link_config.autoneg = phydev->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) ethtool_convert_link_mode_to_legacy_u32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) &tp->link_config.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) phydev->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (tg3_flag(tp, WOL_SPEED_100MB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) linkmode_copy(phydev->advertising, advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) phy_start_aneg(phydev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) if (phyid != PHY_ID_BCMAC131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) phyid &= PHY_BCM_OUI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) if (phyid == PHY_BCM_OUI_1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) phyid == PHY_BCM_OUI_2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) phyid == PHY_BCM_OUI_3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) do_low_power = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) do_low_power = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) val = tr32(GRC_VCPU_EXT_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) } else if (!tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) for (i = 0; i < 200; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) if (tg3_flag(tp, WOL_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) WOL_DRV_STATE_SHUTDOWN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) WOL_DRV_WOL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) WOL_SET_MAGIC_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) if (device_should_wake) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) u32 mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) if (do_low_power &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) tg3_phy_auxctl_write(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) MII_TG3_AUXCTL_PCTL_WOL_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) MII_TG3_AUXCTL_PCTL_100TX_LPWR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) mac_mode = MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) else if (tp->phy_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) if (tp->link_config.active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) mac_mode = MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) mac_mode = MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) mac_mode = MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) if (tg3_asic_rev(tp) == ASIC_REV_5700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) SPEED_100 : SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) if (tg3_5700_link_polarity(tp, speed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) mac_mode |= MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) mac_mode &= ~MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) mac_mode = MAC_MODE_PORT_MODE_TBI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (!tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) tw32(MAC_LED_CTRL, tp->led_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) mac_mode |= MAC_MODE_APE_TX_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) MAC_MODE_APE_RX_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) MAC_MODE_TDE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) tw32_f(MAC_MODE, mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) if (!tg3_flag(tp, WOL_SPEED_100MB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) tg3_asic_rev(tp) == ASIC_REV_5701)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) u32 base_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) base_val = tp->pci_clock_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) CLOCK_CTRL_TXCLK_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) CLOCK_CTRL_PWRDOWN_PLL133, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) } else if (tg3_flag(tp, 5780_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) tg3_flag(tp, CPMU_PRESENT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) u32 newbits1, newbits2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) CLOCK_CTRL_TXCLK_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) CLOCK_CTRL_ALTCLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) } else if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) newbits1 = CLOCK_CTRL_625_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) newbits1 = CLOCK_CTRL_ALTCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) u32 newbits3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) CLOCK_CTRL_TXCLK_DISABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) CLOCK_CTRL_44MHZ_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) newbits3 = CLOCK_CTRL_44MHZ_CORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) tw32_wait_f(TG3PCI_CLOCK_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) tp->pci_clock_ctrl | newbits3, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) tg3_power_down_phy(tp, do_low_power);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) tg3_frob_aux_power(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) /* Workaround for unstable PLL clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) if ((!tg3_flag(tp, IS_SSB_CORE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) u32 val = tr32(0x7d00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) tw32(0x7d00, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) if (!tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) err = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) tg3_halt_cpu(tp, RX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) static void tg3_power_down(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) pci_set_power_state(tp->pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) switch (val & MII_TG3_AUX_STAT_SPDMASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) case MII_TG3_AUX_STAT_10HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) *speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) *duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) case MII_TG3_AUX_STAT_10FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) *speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) *duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) case MII_TG3_AUX_STAT_100HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) *speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) *duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) case MII_TG3_AUX_STAT_100FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) *speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) *duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) case MII_TG3_AUX_STAT_1000HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) *speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) *duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) case MII_TG3_AUX_STAT_1000FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) *speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) *duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) *speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) *duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) u32 val, new_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) new_adv = ADVERTISE_CSMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) new_adv |= mii_advertise_flowctrl(flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) err = tg3_writephy(tp, MII_CTRL1000, new_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) tw32(TG3_CPMU_EEE_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) err = tg3_phy_toggle_auxctl_smdsp(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) u32 err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) /* Advertise 100-BaseTX EEE ability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) if (advertise & ADVERTISED_100baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) val |= MDIO_AN_EEE_ADV_100TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) /* Advertise 1000-BaseT EEE ability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (advertise & ADVERTISED_1000baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) val |= MDIO_AN_EEE_ADV_1000T;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) if (!tp->eee.eee_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) tp->eee.advertised = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) tp->eee.advertised = advertise &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) (ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) ADVERTISED_1000baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) switch (tg3_asic_rev(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) case ASIC_REV_5717:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) case ASIC_REV_57765:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) case ASIC_REV_57766:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) case ASIC_REV_5719:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) /* If we advertised any eee advertisements above... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) val = MII_TG3_DSP_TAP26_ALNOKO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) MII_TG3_DSP_TAP26_RMRXSTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) MII_TG3_DSP_TAP26_OPCSINPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) case ASIC_REV_5720:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) case ASIC_REV_5762:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) MII_TG3_DSP_CH34TP2_HIBW01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) err = err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) static void tg3_phy_copper_begin(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (tp->link_config.autoneg == AUTONEG_ENABLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) u32 adv, fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) adv = ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) ADVERTISED_10baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) if (tg3_flag(tp, WOL_SPEED_100MB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) adv |= ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) ADVERTISED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) if (!(tp->phy_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) TG3_PHYFLG_DISABLE_1G_HD_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) adv |= ADVERTISED_1000baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) adv |= ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) adv = tp->link_config.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) adv &= ~(ADVERTISED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) ADVERTISED_1000baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) fc = tp->link_config.flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) tg3_phy_autoneg_cfg(tp, adv, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) /* Normally during power down we want to autonegotiate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) * the lowest possible speed for WOL. However, to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) * link flap, we leave it untouched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) tg3_writephy(tp, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) u32 bmcr, orig_bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) tp->link_config.active_speed = tp->link_config.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) tp->link_config.active_duplex = tp->link_config.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) if (tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) /* With autoneg disabled, 5715 only links up when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) * advertisement register has the configured speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) switch (tp->link_config.speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) if (tp->link_config.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) (bmcr != orig_bmcr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) for (i = 0; i < 1500; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) if (tg3_readphy(tp, MII_BMSR, &tmp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) tg3_readphy(tp, MII_BMSR, &tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (!(tmp & BMSR_LSTATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) tg3_writephy(tp, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) static int tg3_phy_pull_config(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) err = tg3_readphy(tp, MII_BMCR, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (!(val & BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) tp->link_config.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) tp->link_config.advertising = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) tg3_flag_clear(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) tp->link_config.speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) case BMCR_SPEED100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) tp->link_config.speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) case BMCR_SPEED1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) tp->link_config.speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) if (val & BMCR_FULLDPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) tp->link_config.duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) tp->link_config.duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) tp->link_config.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) tp->link_config.advertising = ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) tg3_flag_set(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) u32 adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) err = tg3_readphy(tp, MII_ADVERTISE, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) tp->link_config.advertising |= adv | ADVERTISED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) tp->link_config.advertising |= ADVERTISED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) u32 adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) err = tg3_readphy(tp, MII_CTRL1000, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) adv = mii_ctrl1000_to_ethtool_adv_t(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) err = tg3_readphy(tp, MII_ADVERTISE, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) adv = tg3_decode_flowctrl_1000X(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) tp->link_config.flowctrl = adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) adv = mii_adv_to_ethtool_adv_x(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) tp->link_config.advertising |= adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) static int tg3_init_5401phy_dsp(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) /* Turn off tap power management. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) /* Set Extended packet length bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) static bool tg3_phy_eee_config_ok(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) struct ethtool_eee eee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) tg3_eee_pull_config(tp, &eee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if (tp->eee.eee_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) if (tp->eee.advertised != eee.advertised ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) /* EEE is disabled but we're advertising */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) if (eee.advertised)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) u32 advmsk, tgtadv, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) advertising = tp->link_config.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) advmsk = ADVERTISE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) if (tp->link_config.active_duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) if ((*lcladv & advmsk) != tgtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) u32 tg3_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) if (tgtadv &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) if (tg3_ctrl != tgtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) u32 lpeth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) if (tg3_readphy(tp, MII_STAT1000, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) lpeth = mii_stat1000_to_ethtool_lpa_t(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) if (tg3_readphy(tp, MII_LPA, rmtadv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) tp->link_config.rmt_adv = lpeth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) if (curr_link_up != tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) if (curr_link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) netif_carrier_on(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) netif_carrier_off(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) tg3_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) static void tg3_clear_mac_status(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) tw32(MAC_EVENT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) tw32_f(MAC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) MAC_STATUS_CFG_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) MAC_STATUS_MI_COMPLETION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) MAC_STATUS_LNKSTATE_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) static void tg3_setup_eee(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) TG3_CPMU_EEE_LNKIDL_UART_IDL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) tw32_f(TG3_CPMU_EEE_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) TG3_CPMU_EEEMD_LPI_IN_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) TG3_CPMU_EEEMD_EEE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) if (tg3_asic_rev(tp) != ASIC_REV_5717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) tw32_f(TG3_CPMU_EEE_DBTMR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) TG3_CPMU_DBTMR1_PCIEXIT_2047US |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) (tp->eee.tx_lpi_timer & 0xffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) tw32_f(TG3_CPMU_EEE_DBTMR2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) TG3_CPMU_DBTMR2_APE_TX_2047US |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) bool current_link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) u32 bmsr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) u32 lcl_adv, rmt_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) u32 current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) u8 current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) tg3_clear_mac_status(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) tw32_f(MAC_MI_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) /* Some third-party PHYs need to be reset on link going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) tg3_asic_rev(tp) == ASIC_REV_5704 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) tg3_asic_rev(tp) == ASIC_REV_5705) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) !(bmsr & BMSR_LSTATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) force_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) if (force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) !tg3_flag(tp, INIT_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) bmsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) if (!(bmsr & BMSR_LSTATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) err = tg3_init_5401phy_dsp(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) for (i = 0; i < 1000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) (bmsr & BMSR_LSTATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) TG3_PHY_REV_BCM5401_B0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) !(bmsr & BMSR_LSTATUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) tp->link_config.active_speed == SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) err = tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) err = tg3_init_5401phy_dsp(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) /* 5701 {A0,B0} CRC bug workaround */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) tg3_writephy(tp, 0x15, 0x0a75);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) /* Clear pending interrupts... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) tg3_readphy(tp, MII_TG3_ISTAT, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) tg3_readphy(tp, MII_TG3_ISTAT, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) tg3_writephy(tp, MII_TG3_IMASK, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) tg3_writephy(tp, MII_TG3_EXT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) MII_TG3_EXT_CTRL_LNK3_LED_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) current_speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) current_duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) tp->link_config.rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) err = tg3_phy_auxctl_read(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) if (!err && !(val & (1 << 10))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) tg3_phy_auxctl_write(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) val | (1 << 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) goto relink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) bmsr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) (bmsr & BMSR_LSTATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) u32 aux_stat, bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) for (i = 0; i < 2000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) aux_stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) tg3_aux_stat_to_speed_duplex(tp, aux_stat,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) ¤t_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) ¤t_duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) for (i = 0; i < 200; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) if (tg3_readphy(tp, MII_BMCR, &bmcr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) if (bmcr && bmcr != 0x7fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) lcl_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) tp->link_config.active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) tp->link_config.active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) if (tp->link_config.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) bool eee_config_ok = tg3_phy_eee_config_ok(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) if ((bmcr & BMCR_ANENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) eee_config_ok &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) /* EEE settings changes take effect only after a phy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) * reset. If we have skipped a reset due to Link Flap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) * Avoidance being enabled, do it now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) if (!eee_config_ok &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) !force_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) tg3_setup_eee(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) if (!(bmcr & BMCR_ANENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) tp->link_config.speed == current_speed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) tp->link_config.duplex == current_duplex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) if (current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) tp->link_config.active_duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) u32 reg, bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) reg = MII_TG3_FET_GEN_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) reg = MII_TG3_EXT_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) bit = MII_TG3_EXT_STAT_MDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) if (!tg3_readphy(tp, reg, &val) && (val & bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) relink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) tg3_phy_copper_begin(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) if (tg3_flag(tp, ROBOSWITCH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) /* FIXME: when BCM5325 switch is used use 100 MBit/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) current_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) tp->link_config.active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) tp->link_config.active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) if (current_link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) if (tp->link_config.active_speed == SPEED_100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) tp->link_config.active_speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) /* In order for the 5750 core in BCM4785 chip to work properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) * in RGMII mode, the Led Control Register must be set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) if (tg3_flag(tp, RGMII_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) u32 led_ctrl = tr32(MAC_LED_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) if (tp->link_config.active_speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) else if (tp->link_config.active_speed == SPEED_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) LED_CTRL_100MBPS_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) else if (tp->link_config.active_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) LED_CTRL_1000MBPS_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) tw32(MAC_LED_CTRL, led_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) if (tp->link_config.active_duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) if (tg3_asic_rev(tp) == ASIC_REV_5700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) if (current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) tg3_5700_link_polarity(tp, tp->link_config.active_speed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) tp->mac_mode |= MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) /* ??? Without this setting Netgear GA302T PHY does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) * ??? send/receive packets...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) tw32_f(MAC_MI_MODE, tp->mi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) udelay(80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) tg3_phy_eee_adjust(tp, current_link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) if (tg3_flag(tp, USE_LINKCHG_REG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) /* Polled via timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) tw32_f(MAC_EVENT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) tp->link_config.active_speed == SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) udelay(120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) tw32_f(MAC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) MAC_STATUS_CFG_CHANGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) tg3_write_mem(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) NIC_SRAM_FIRMWARE_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) /* Prevent send BD corruption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) if (tg3_flag(tp, CLKREQ_BUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) if (tp->link_config.active_speed == SPEED_100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) tp->link_config.active_speed == SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) PCI_EXP_LNKCTL_CLKREQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) PCI_EXP_LNKCTL_CLKREQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) tg3_test_and_report_link_chg(tp, current_link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) struct tg3_fiber_aneginfo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) #define ANEG_STATE_UNKNOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) #define ANEG_STATE_AN_ENABLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) #define ANEG_STATE_RESTART_INIT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) #define ANEG_STATE_RESTART 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) #define ANEG_STATE_DISABLE_LINK_OK 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) #define ANEG_STATE_ABILITY_DETECT_INIT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) #define ANEG_STATE_ABILITY_DETECT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) #define ANEG_STATE_ACK_DETECT_INIT 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) #define ANEG_STATE_ACK_DETECT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) #define ANEG_STATE_COMPLETE_ACK_INIT 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) #define ANEG_STATE_COMPLETE_ACK 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) #define ANEG_STATE_IDLE_DETECT_INIT 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) #define ANEG_STATE_IDLE_DETECT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) #define ANEG_STATE_LINK_OK 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) #define ANEG_STATE_NEXT_PAGE_WAIT 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) #define MR_AN_ENABLE 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) #define MR_RESTART_AN 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) #define MR_AN_COMPLETE 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) #define MR_PAGE_RX 0x00000008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) #define MR_NP_LOADED 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) #define MR_TOGGLE_TX 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) #define MR_LP_ADV_FULL_DUPLEX 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) #define MR_LP_ADV_HALF_DUPLEX 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) #define MR_LP_ADV_SYM_PAUSE 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) #define MR_LP_ADV_ASYM_PAUSE 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) #define MR_LP_ADV_NEXT_PAGE 0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) #define MR_TOGGLE_RX 0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) #define MR_NP_RX 0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) #define MR_LINK_OK 0x80000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) unsigned long link_time, cur_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) u32 ability_match_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) int ability_match_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) char ability_match, idle_match, ack_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) u32 txconfig, rxconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) #define ANEG_CFG_NP 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) #define ANEG_CFG_ACK 0x00000040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) #define ANEG_CFG_RF2 0x00000020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) #define ANEG_CFG_RF1 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) #define ANEG_CFG_PS2 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) #define ANEG_CFG_PS1 0x00008000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) #define ANEG_CFG_HD 0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) #define ANEG_CFG_FD 0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) #define ANEG_CFG_INVAL 0x00001f06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) #define ANEG_OK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) #define ANEG_DONE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) #define ANEG_TIMER_ENAB 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) #define ANEG_FAILED -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) #define ANEG_STATE_SETTLE_TIME 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) static int tg3_fiber_aneg_smachine(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) struct tg3_fiber_aneginfo *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) u16 flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) unsigned long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) u32 rx_cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) if (ap->state == ANEG_STATE_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) ap->rxconfig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) ap->link_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) ap->cur_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) ap->ability_match_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) ap->ability_match_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) ap->ability_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) ap->idle_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) ap->ack_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) ap->cur_time++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) if (rx_cfg_reg != ap->ability_match_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) ap->ability_match_cfg = rx_cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) ap->ability_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) ap->ability_match_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) if (++ap->ability_match_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) ap->ability_match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) ap->ability_match_cfg = rx_cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) if (rx_cfg_reg & ANEG_CFG_ACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) ap->ack_match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) ap->ack_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) ap->idle_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) ap->idle_match = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) ap->ability_match_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) ap->ability_match_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) ap->ability_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) ap->ack_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) rx_cfg_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) ap->rxconfig = rx_cfg_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) ret = ANEG_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) switch (ap->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) case ANEG_STATE_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) ap->state = ANEG_STATE_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) case ANEG_STATE_AN_ENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) if (ap->flags & MR_AN_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) ap->link_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) ap->cur_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) ap->ability_match_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) ap->ability_match_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) ap->ability_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) ap->idle_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) ap->ack_match = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) ap->state = ANEG_STATE_RESTART_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) ap->state = ANEG_STATE_DISABLE_LINK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) case ANEG_STATE_RESTART_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) ap->link_time = ap->cur_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) ap->flags &= ~(MR_NP_LOADED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) ap->txconfig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) tw32(MAC_TX_AUTO_NEG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) ret = ANEG_TIMER_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) ap->state = ANEG_STATE_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) case ANEG_STATE_RESTART:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) delta = ap->cur_time - ap->link_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) if (delta > ANEG_STATE_SETTLE_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) ret = ANEG_TIMER_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) case ANEG_STATE_DISABLE_LINK_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) ret = ANEG_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) case ANEG_STATE_ABILITY_DETECT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) ap->flags &= ~(MR_TOGGLE_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) ap->txconfig = ANEG_CFG_FD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) if (flowctrl & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) ap->txconfig |= ANEG_CFG_PS1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) if (flowctrl & ADVERTISE_1000XPSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) ap->txconfig |= ANEG_CFG_PS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) tw32(MAC_TX_AUTO_NEG, ap->txconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) ap->state = ANEG_STATE_ABILITY_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) case ANEG_STATE_ABILITY_DETECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) if (ap->ability_match != 0 && ap->rxconfig != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) ap->state = ANEG_STATE_ACK_DETECT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) case ANEG_STATE_ACK_DETECT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) ap->txconfig |= ANEG_CFG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) tw32(MAC_TX_AUTO_NEG, ap->txconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) ap->state = ANEG_STATE_ACK_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) case ANEG_STATE_ACK_DETECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) if (ap->ack_match != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) ap->state = ANEG_STATE_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) } else if (ap->ability_match != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) ap->rxconfig == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) ap->state = ANEG_STATE_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) case ANEG_STATE_COMPLETE_ACK_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) if (ap->rxconfig & ANEG_CFG_INVAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) ret = ANEG_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) MR_LP_ADV_HALF_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) MR_LP_ADV_SYM_PAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) MR_LP_ADV_ASYM_PAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) MR_LP_ADV_REMOTE_FAULT1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) MR_LP_ADV_REMOTE_FAULT2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) MR_LP_ADV_NEXT_PAGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) MR_TOGGLE_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) MR_NP_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) if (ap->rxconfig & ANEG_CFG_FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) ap->flags |= MR_LP_ADV_FULL_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) if (ap->rxconfig & ANEG_CFG_HD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) ap->flags |= MR_LP_ADV_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) if (ap->rxconfig & ANEG_CFG_PS1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) ap->flags |= MR_LP_ADV_SYM_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) if (ap->rxconfig & ANEG_CFG_PS2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) ap->flags |= MR_LP_ADV_ASYM_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) if (ap->rxconfig & ANEG_CFG_RF1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) if (ap->rxconfig & ANEG_CFG_RF2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) if (ap->rxconfig & ANEG_CFG_NP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) ap->flags |= MR_LP_ADV_NEXT_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) ap->link_time = ap->cur_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) ap->flags ^= (MR_TOGGLE_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) if (ap->rxconfig & 0x0008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) ap->flags |= MR_TOGGLE_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) if (ap->rxconfig & ANEG_CFG_NP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) ap->flags |= MR_NP_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) ap->flags |= MR_PAGE_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) ap->state = ANEG_STATE_COMPLETE_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) ret = ANEG_TIMER_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) case ANEG_STATE_COMPLETE_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) if (ap->ability_match != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) ap->rxconfig == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) ap->state = ANEG_STATE_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) delta = ap->cur_time - ap->link_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) if (delta > ANEG_STATE_SETTLE_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) ap->state = ANEG_STATE_IDLE_DETECT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) !(ap->flags & MR_NP_RX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) ap->state = ANEG_STATE_IDLE_DETECT_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) ret = ANEG_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) case ANEG_STATE_IDLE_DETECT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) ap->link_time = ap->cur_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) ap->state = ANEG_STATE_IDLE_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) ret = ANEG_TIMER_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) case ANEG_STATE_IDLE_DETECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) if (ap->ability_match != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) ap->rxconfig == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) ap->state = ANEG_STATE_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) delta = ap->cur_time - ap->link_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) if (delta > ANEG_STATE_SETTLE_TIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) /* XXX another gem from the Broadcom driver :( */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) ap->state = ANEG_STATE_LINK_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) case ANEG_STATE_LINK_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) ret = ANEG_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) /* ??? unimplemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) case ANEG_STATE_NEXT_PAGE_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) /* ??? unimplemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) ret = ANEG_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) struct tg3_fiber_aneginfo aninfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) int status = ANEG_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) unsigned int tick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) tw32_f(MAC_TX_AUTO_NEG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) memset(&aninfo, 0, sizeof(aninfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) aninfo.flags |= MR_AN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) aninfo.state = ANEG_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) aninfo.cur_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) tick = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) while (++tick < 195000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) status = tg3_fiber_aneg_smachine(tp, &aninfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) if (status == ANEG_DONE || status == ANEG_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) *txflags = aninfo.txconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) *rxflags = aninfo.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) if (status == ANEG_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) MR_LP_ADV_FULL_DUPLEX)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) res = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) static void tg3_init_bcm8002(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) u32 mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) /* Reset when initting first time or we have a link. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) if (tg3_flag(tp, INIT_COMPLETE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) !(mac_status & MAC_STATUS_PCS_SYNCED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) /* Set PLL lock range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) tg3_writephy(tp, 0x16, 0x8007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) /* SW reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) tg3_writephy(tp, MII_BMCR, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) /* Wait for reset to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) /* XXX schedule_timeout() ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) for (i = 0; i < 500; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) /* Config mode; select PMA/Ch 1 regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) tg3_writephy(tp, 0x10, 0x8411);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) /* Enable auto-lock and comdet, select txclk for tx. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) tg3_writephy(tp, 0x11, 0x0a10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) tg3_writephy(tp, 0x18, 0x00a0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) tg3_writephy(tp, 0x16, 0x41ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) /* Assert and deassert POR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) tg3_writephy(tp, 0x13, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) tg3_writephy(tp, 0x13, 0x0000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) tg3_writephy(tp, 0x11, 0x0a50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) tg3_writephy(tp, 0x11, 0x0a10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) /* Wait for signal to stabilize */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) /* XXX schedule_timeout() ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) for (i = 0; i < 15000; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) /* Deselect the channel register so we can read the PHYID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) tg3_writephy(tp, 0x10, 0x8011);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) u16 flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) bool current_link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) u32 sg_dig_ctrl, sg_dig_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) u32 serdes_cfg, expected_sg_dig_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) int workaround, port_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) serdes_cfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) expected_sg_dig_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) workaround = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) port_a = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) workaround = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) port_a = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) /* preserve bits 0-11,13,14 for signal pre-emphasis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) /* preserve bits 20-23 for voltage regulator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) sg_dig_ctrl = tr32(SG_DIG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) if (tp->link_config.autoneg != AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) if (workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) u32 val = serdes_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) if (port_a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) val |= 0xc010000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) val |= 0x4010000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) tw32_f(MAC_SERDES_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) if (mac_status & MAC_STATUS_PCS_SYNCED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) tg3_setup_flow_control(tp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) /* Want auto-negotiation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) if (flowctrl & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) if (flowctrl & ADVERTISE_1000XPSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) if (sg_dig_ctrl != expected_sg_dig_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) tp->serdes_counter &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) ((mac_status & (MAC_STATUS_PCS_SYNCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) MAC_STATUS_RCVD_CFG)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) MAC_STATUS_PCS_SYNCED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) tp->serdes_counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) restart_autoneg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) if (workaround)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) MAC_STATUS_SIGNAL_DET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) sg_dig_status = tr32(SG_DIG_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) (mac_status & MAC_STATUS_PCS_SYNCED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) u32 local_adv = 0, remote_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) local_adv |= ADVERTISE_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) local_adv |= ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) remote_adv |= LPA_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) remote_adv |= LPA_1000XPAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) tp->link_config.rmt_adv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) mii_adv_to_ethtool_adv_x(remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) tg3_setup_flow_control(tp, local_adv, remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) tp->serdes_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) if (tp->serdes_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) tp->serdes_counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) if (workaround) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) u32 val = serdes_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) if (port_a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) val |= 0xc010000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) val |= 0x4010000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) tw32_f(MAC_SERDES_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) /* Link parallel detection - link is up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) /* only if we have PCS_SYNC and not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) /* receiving config code words */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) !(mac_status & MAC_STATUS_RCVD_CFG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) tg3_setup_flow_control(tp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) tp->phy_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) tp->serdes_counter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) SERDES_PARALLEL_DET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) goto restart_autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) return current_link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) bool current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) if (!(mac_status & MAC_STATUS_PCS_SYNCED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) if (tp->link_config.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) u32 txflags, rxflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) if (fiber_autoneg(tp, &txflags, &rxflags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) u32 local_adv = 0, remote_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) if (txflags & ANEG_CFG_PS1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) local_adv |= ADVERTISE_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) if (txflags & ANEG_CFG_PS2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) local_adv |= ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) if (rxflags & MR_LP_ADV_SYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) remote_adv |= LPA_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) if (rxflags & MR_LP_ADV_ASYM_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) remote_adv |= LPA_1000XPAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) tp->link_config.rmt_adv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) mii_adv_to_ethtool_adv_x(remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) tg3_setup_flow_control(tp, local_adv, remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) for (i = 0; i < 30; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) tw32_f(MAC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) MAC_STATUS_CFG_CHANGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) if ((tr32(MAC_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) MAC_STATUS_CFG_CHANGED)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) if (!current_link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) (mac_status & MAC_STATUS_PCS_SYNCED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) !(mac_status & MAC_STATUS_RCVD_CFG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) tg3_setup_flow_control(tp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) /* Forcing 1000FD link up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) return current_link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) u32 orig_pause_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) u32 orig_active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) u8 orig_active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) u32 mac_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) bool current_link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) orig_pause_cfg = tp->link_config.active_flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) orig_active_speed = tp->link_config.active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) orig_active_duplex = tp->link_config.active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) if (!tg3_flag(tp, HW_AUTONEG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) tp->link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) tg3_flag(tp, INIT_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) mac_status &= (MAC_STATUS_PCS_SYNCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) MAC_STATUS_SIGNAL_DET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) MAC_STATUS_CFG_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) MAC_STATUS_RCVD_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) if (mac_status == (MAC_STATUS_PCS_SYNCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) MAC_STATUS_SIGNAL_DET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) MAC_STATUS_CFG_CHANGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) tw32_f(MAC_TX_AUTO_NEG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) if (tp->phy_id == TG3_PHY_ID_BCM8002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) tg3_init_bcm8002(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) /* Enable link change event even when serdes polling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) tp->link_config.rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) if (tg3_flag(tp, HW_AUTONEG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) tp->napi[0].hw_status->status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) (SD_STATUS_UPDATED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) MAC_STATUS_CFG_CHANGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) MAC_STATUS_CFG_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) MAC_STATUS_LNKSTATE_CHANGED)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) mac_status = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) if (tp->link_config.autoneg == AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) tp->serdes_counter == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) tw32_f(MAC_MODE, (tp->mac_mode |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) MAC_MODE_SEND_CONFIGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) if (current_link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) tp->link_config.active_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) tp->link_config.active_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) tw32(MAC_LED_CTRL, (tp->led_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) LED_CTRL_1000MBPS_ON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) tp->link_config.active_speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) tp->link_config.active_duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) tw32(MAC_LED_CTRL, (tp->led_ctrl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) LED_CTRL_TRAFFIC_OVERRIDE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) u32 now_pause_cfg = tp->link_config.active_flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) if (orig_pause_cfg != now_pause_cfg ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) orig_active_speed != tp->link_config.active_speed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) orig_active_duplex != tp->link_config.active_duplex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) tg3_link_report(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) u32 bmsr, bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) u32 current_speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) u8 current_duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) bool current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) u32 local_adv, remote_adv, sgsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) tg3_asic_rev(tp) == ASIC_REV_5720) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) (sgsr & SERDES_TG3_SGMII_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) if (force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) if (!(sgsr & SERDES_TG3_LINK_UP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) if (sgsr & SERDES_TG3_SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) current_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) } else if (sgsr & SERDES_TG3_SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) current_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) current_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) if (sgsr & SERDES_TG3_FULL_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) current_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) tg3_clear_mac_status(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) goto fiber_setup_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) tg3_clear_mac_status(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) if (force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) tp->link_config.rmt_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) err |= tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) err |= tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) if (tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) bmsr |= BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) bmsr &= ~BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) err |= tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) /* do nothing, just check for link up at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) u32 adv, newadv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) ADVERTISE_1000XPAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) ADVERTISE_1000XPSE_ASYM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) ADVERTISE_SLCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) tg3_writephy(tp, MII_ADVERTISE, newadv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) tg3_writephy(tp, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) u32 new_bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) bmcr &= ~BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) if (tp->link_config.duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) new_bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) if (new_bmcr != bmcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) /* BMCR_SPEED1000 is a reserved bit that needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) * to be set on write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) new_bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) /* Force a linkdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) if (tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) u32 adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) adv &= ~(ADVERTISE_1000XFULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) ADVERTISE_1000XHALF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) ADVERTISE_SLCT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) tg3_writephy(tp, MII_ADVERTISE, adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) tg3_writephy(tp, MII_BMCR, bmcr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) BMCR_ANRESTART |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) tg3_carrier_off(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) tg3_writephy(tp, MII_BMCR, new_bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) bmcr = new_bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) err |= tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) err |= tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) if (tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) bmsr |= BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) bmsr &= ~BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) current_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) current_link_up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) if (bmcr & BMCR_FULLDPLX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) current_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) local_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) remote_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) u32 common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) err |= tg3_readphy(tp, MII_LPA, &remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) common = local_adv & remote_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) if (common & (ADVERTISE_1000XHALF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) ADVERTISE_1000XFULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) if (common & ADVERTISE_1000XFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) current_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) current_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) tp->link_config.rmt_adv =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) mii_adv_to_ethtool_adv_x(remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) } else if (!tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) /* Link is up via parallel detect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) current_link_up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) fiber_setup_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) if (current_link_up && current_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) tg3_setup_flow_control(tp, local_adv, remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) if (tp->link_config.active_duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) tp->link_config.active_speed = current_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) tp->link_config.active_duplex = current_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) tg3_test_and_report_link_chg(tp, current_link_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) static void tg3_serdes_parallel_detect(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) if (tp->serdes_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) /* Give autoneg time to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) tp->serdes_counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) if (!tp->link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) (tp->link_config.autoneg == AUTONEG_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) u32 phy1, phy2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) /* Select shadow register 0x1f */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) /* Select expansion interrupt status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) MII_TG3_DSP_EXP1_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) if ((phy1 & 0x10) && !(phy2 & 0x20)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) /* We have signal detect and not receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) * config code words, link is up by parallel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) * detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) bmcr &= ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) tg3_writephy(tp, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) } else if (tp->link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) (tp->link_config.autoneg == AUTONEG_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) u32 phy2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) /* Select expansion interrupt status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) MII_TG3_DSP_EXP1_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) if (phy2 & 0x20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) /* Config code words received, turn on autoneg. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) err = tg3_setup_fiber_phy(tp, force_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) err = tg3_setup_fiber_mii_phy(tp, force_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) err = tg3_setup_copper_phy(tp, force_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) u32 scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) scale = 65;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) scale = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) scale = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) tw32(GRC_MISC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) (6 << TX_LENGTHS_IPG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) val |= tr32(MAC_TX_LENGTHS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) (TX_LENGTHS_JMB_FRM_LEN_MSK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) TX_LENGTHS_CNT_DWN_VAL_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) if (tp->link_config.active_speed == SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) tp->link_config.active_duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) tw32(MAC_TX_LENGTHS, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) tw32(MAC_TX_LENGTHS, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) if (tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) tw32(HOSTCC_STAT_COAL_TICKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) tp->coal.stats_block_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) tw32(HOSTCC_STAT_COAL_TICKS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) if (tg3_flag(tp, ASPM_WORKAROUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) val = tr32(PCIE_PWR_MGMT_THRESH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) if (!tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) tp->pwrmgmt_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) tw32(PCIE_PWR_MGMT_THRESH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) /* tp->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) u64 stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) ptp_read_system_prets(sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) stamp = tr32(TG3_EAV_REF_CLCK_LSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) ptp_read_system_postts(sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) return stamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) /* tp->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) static void tg3_refclk_write(struct tg3 *tp, u64 newval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) static inline void tg3_full_unlock(struct tg3 *tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) SOF_TIMESTAMPING_RX_SOFTWARE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) SOF_TIMESTAMPING_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) if (tg3_flag(tp, PTP_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) SOF_TIMESTAMPING_RX_HARDWARE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) SOF_TIMESTAMPING_RAW_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) if (tp->ptp_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) info->phc_index = ptp_clock_index(tp->ptp_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) info->phc_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) bool neg_adj = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) u32 correction = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) if (ppb < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) neg_adj = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) ppb = -ppb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) /* Frequency adjustment is performed using hardware with a 24 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) * accumulator and a programmable correction value. On each clk, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) * correction value gets added to the accumulator and when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) * overflows, the time counter is incremented/decremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) * So conversion from ppb to correction value is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) * ppb * (1 << 24) / 1000000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) TG3_EAV_REF_CLK_CORRECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) if (correction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) TG3_EAV_REF_CLK_CORRECT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) tp->ptp_adjust += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) struct ptp_system_timestamp *sts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) ns = tg3_refclk_read(tp, sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) ns += tp->ptp_adjust;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) *ts = ns_to_timespec64(ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) static int tg3_ptp_settime(struct ptp_clock_info *ptp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) const struct timespec64 *ts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) u64 ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) ns = timespec64_to_ns(ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) tg3_refclk_write(tp, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) tp->ptp_adjust = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) static int tg3_ptp_enable(struct ptp_clock_info *ptp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) struct ptp_clock_request *rq, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) u32 clock_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) int rval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) switch (rq->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) case PTP_CLK_REQ_PEROUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) /* Reject requests with unsupported flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) if (rq->perout.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) if (rq->perout.index != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) u64 nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) nsec = rq->perout.start.sec * 1000000000ULL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) rq->perout.start.nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) if (rq->perout.period.sec || rq->perout.period.nsec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) "Device supports only a one-shot timesync output, period must be 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) rval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) if (nsec & (1ULL << 63)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) rval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) tw32(TG3_EAV_WATCHDOG0_MSB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) TG3_EAV_WATCHDOG0_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) tw32(TG3_EAV_REF_CLCK_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) tw32(TG3_EAV_WATCHDOG0_MSB, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) static const struct ptp_clock_info tg3_ptp_caps = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) .name = "tg3 clock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) .max_adj = 250000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) .n_alarm = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) .n_ext_ts = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) .n_per_out = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) .n_pins = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) .pps = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) .adjfreq = tg3_ptp_adjfreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) .adjtime = tg3_ptp_adjtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) .gettimex64 = tg3_ptp_gettimex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) .settime64 = tg3_ptp_settime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) .enable = tg3_ptp_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) struct skb_shared_hwtstamps *timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) tp->ptp_adjust);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) /* tp->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) static void tg3_ptp_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) if (!tg3_flag(tp, PTP_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) /* Initialize the hardware clock to the system time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) tp->ptp_adjust = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) tp->ptp_info = tg3_ptp_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) /* tp->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) static void tg3_ptp_resume(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) if (!tg3_flag(tp, PTP_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) tp->ptp_adjust = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) static void tg3_ptp_fini(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) ptp_clock_unregister(tp->ptp_clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) tp->ptp_clock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) tp->ptp_adjust = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) static inline int tg3_irq_sync(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) return tp->irq_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) dst = (u32 *)((u8 *)dst + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) for (i = 0; i < len; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) *dst++ = tr32(off + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) if (tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) if (tg3_flag(tp, NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) static void tg3_dump_state(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) u32 *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) if (!regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) /* Read up to but not including private PCI registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) regs[i / sizeof(u32)] = tr32(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) tg3_dump_legacy_regs(tp, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) if (!regs[i + 0] && !regs[i + 1] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) !regs[i + 2] && !regs[i + 3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) i * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) kfree(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) /* SW status block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) tnapi->hw_status->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) tnapi->hw_status->status_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) tnapi->hw_status->rx_jumbo_consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) tnapi->hw_status->rx_consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) tnapi->hw_status->rx_mini_consumer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) tnapi->hw_status->idx[0].rx_producer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) tnapi->hw_status->idx[0].tx_consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) tnapi->last_tag, tnapi->last_irq_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) tnapi->rx_rcb_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) tnapi->prodring.rx_std_prod_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) tnapi->prodring.rx_std_cons_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) tnapi->prodring.rx_jmb_prod_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) tnapi->prodring.rx_jmb_cons_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) /* This is called whenever we suspect that the system chipset is re-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) * ordering the sequence of MMIO to the tx send mailbox. The symptom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) * is bogus tx completions. We try to recover by setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) * in the workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) static void tg3_tx_recover(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) tp->write32_tx_mbox == tg3_write_indirect_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) "The system may be re-ordering memory-mapped I/O "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) "cycles to the network device, attempting to recover. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) "Please report the problem to the driver maintainer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) "and include system chipset information.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) tg3_flag_set(tp, TX_RECOVERY_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) /* Tell compiler to fetch tx indices from memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) return tnapi->tx_pending -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) /* Tigon3 never reports partial packet sends. So we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) * need special logic to handle SKBs that have not had all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) * of their frags sent yet, like SunGEM does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) static void tg3_tx(struct tg3_napi *tnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) u32 sw_idx = tnapi->tx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) int index = tnapi - tp->napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) unsigned int pkts_compl = 0, bytes_compl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) txq = netdev_get_tx_queue(tp->dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) while (sw_idx != hw_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) struct sk_buff *skb = ri->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) int i, tx_bug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) tg3_tx_recover(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) struct skb_shared_hwtstamps timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) skb_tstamp_tx(skb, ×tamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) pci_unmap_single(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) dma_unmap_addr(ri, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) ri->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) while (ri->fragmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) ri->fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) sw_idx = NEXT_TX(sw_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) ri = &tnapi->tx_buffers[sw_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) sw_idx = NEXT_TX(sw_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) ri = &tnapi->tx_buffers[sw_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) tx_bug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) pci_unmap_page(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) dma_unmap_addr(ri, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) skb_frag_size(&skb_shinfo(skb)->frags[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) while (ri->fragmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) ri->fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) sw_idx = NEXT_TX(sw_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) ri = &tnapi->tx_buffers[sw_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) sw_idx = NEXT_TX(sw_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) pkts_compl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) bytes_compl += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) if (unlikely(tx_bug)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) tg3_tx_recover(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) tnapi->tx_cons = sw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) /* Need to make the tx_cons update visible to tg3_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) * before checking for netif_queue_stopped(). Without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) * memory barrier, there is a small possibility that tg3_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) * will miss it and cause the queue to be stopped forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) if (unlikely(netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) __netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) if (netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) __netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) static void tg3_frag_free(bool is_frag, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) if (is_frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) skb_free_frag(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) if (!ri->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) map_sz, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) ri->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) /* Returns size of skb allocated or < 0 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) * We only need to fill in the address because the other members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) * of the RX descriptor are invariant, see tg3_init_rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) * Note the purposeful assymetry of cpu vs. chip accesses. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) * posting buffers we only dirty the first cache line of the RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) * descriptor (containing the address). Whereas for the RX status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) * buffers the cpu only reads the last cacheline of the RX descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) u32 opaque_key, u32 dest_idx_unmasked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) unsigned int *frag_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) struct tg3_rx_buffer_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) struct ring_info *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) int skb_size, data_size, dest_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) switch (opaque_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) case RXD_OPAQUE_RING_STD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) desc = &tpr->rx_std[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) map = &tpr->rx_std_buffers[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) data_size = tp->rx_pkt_map_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) case RXD_OPAQUE_RING_JUMBO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) desc = &tpr->rx_jmb[dest_idx].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) map = &tpr->rx_jmb_buffers[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) data_size = TG3_RX_JMB_MAP_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) /* Do not overwrite any of the map or rp information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) * until we are sure we can commit to a new buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) * Callers depend upon this behavior and assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) * we leave everything unchanged if we fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) if (skb_size <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) data = napi_alloc_frag(skb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) *frag_size = skb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) data = kmalloc(skb_size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) *frag_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) mapping = pci_map_single(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) data + TG3_RX_OFFSET(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) tg3_frag_free(skb_size <= PAGE_SIZE, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) map->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) dma_unmap_addr_set(map, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) desc->addr_hi = ((u64)mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) desc->addr_lo = ((u64)mapping & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) return data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) /* We only need to move over in the address because the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) * members of the RX descriptor are invariant. See notes above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) * tg3_alloc_rx_data for full details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) static void tg3_recycle_rx(struct tg3_napi *tnapi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) struct tg3_rx_prodring_set *dpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) u32 opaque_key, int src_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) u32 dest_idx_unmasked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) struct tg3_rx_buffer_desc *src_desc, *dest_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) struct ring_info *src_map, *dest_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) int dest_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) switch (opaque_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) case RXD_OPAQUE_RING_STD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) dest_desc = &dpr->rx_std[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) dest_map = &dpr->rx_std_buffers[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) src_desc = &spr->rx_std[src_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) src_map = &spr->rx_std_buffers[src_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) case RXD_OPAQUE_RING_JUMBO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) dest_desc = &dpr->rx_jmb[dest_idx].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) dest_map = &dpr->rx_jmb_buffers[dest_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) src_desc = &spr->rx_jmb[src_idx].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) src_map = &spr->rx_jmb_buffers[src_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) dest_map->data = src_map->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) dma_unmap_addr_set(dest_map, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) dma_unmap_addr(src_map, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) dest_desc->addr_hi = src_desc->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) dest_desc->addr_lo = src_desc->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) /* Ensure that the update to the skb happens after the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) * addresses have been transferred to the new BD location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) src_map->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) /* The RX ring scheme is composed of multiple rings which post fresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) * buffers to the chip, and one special ring the chip uses to report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) * status back to the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) * The special ring reports the status of received packets to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) * host. The chip does not write into the original descriptor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) * RX buffer was obtained from. The chip simply takes the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) * descriptor as provided by the host, updates the status and length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) * field, then writes this into the next status ring entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) * Each ring the host uses to post buffers to the chip is described
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) * it is first placed into the on-chip ram. When the packet's length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) * is known, it walks down the TG3_BDINFO entries to select the ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) * which is within the range of the new packet's length is chosen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) * The "separate ring for rx status" scheme may sound queer, but it makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) * sense from a cache coherency perspective. If only the host writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) * to the buffer post rings, and only the chip writes to the rx status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) * rings, then cache lines never move beyond shared-modified state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) * If both the host and chip were to write into the same ring, cache line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) * eviction could occur since both entities want it in an exclusive state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) static int tg3_rx(struct tg3_napi *tnapi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) u32 work_mask, rx_std_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) u32 std_prod_idx, jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) u32 sw_idx = tnapi->rx_rcb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) u16 hw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) int received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) hw_idx = *(tnapi->rx_rcb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) * We need to order the read of hw_idx and the read of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) * the opaque cookie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) work_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) std_prod_idx = tpr->rx_std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) jmb_prod_idx = tpr->rx_jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) while (sw_idx != hw_idx && budget > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) struct ring_info *ri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) u32 opaque_key, desc_idx, *post_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) u64 tstamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) if (opaque_key == RXD_OPAQUE_RING_STD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) dma_addr = dma_unmap_addr(ri, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) data = ri->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) post_ptr = &std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) rx_std_posted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) dma_addr = dma_unmap_addr(ri, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) data = ri->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) post_ptr = &jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) goto next_pkt_nopost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) work_mask |= opaque_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) if (desc->err_vlan & RXD_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) drop_it:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) tg3_recycle_rx(tnapi, tpr, opaque_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) desc_idx, *post_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) drop_it_no_recycle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) /* Other statistics kept track of by card. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) tp->rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) goto next_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) prefetch(data + TG3_RX_OFFSET(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) RXD_FLAG_PTPSTAT_PTPV1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) RXD_FLAG_PTPSTAT_PTPV2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) tstamp = tr32(TG3_RX_TSTAMP_LSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) if (len > TG3_RX_COPY_THRESH(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) int skb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) unsigned int frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) *post_ptr, &frag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) if (skb_size < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) goto drop_it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) pci_unmap_single(tp->pdev, dma_addr, skb_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) /* Ensure that the update to the data happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) * after the usage of the old DMA mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) ri->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) skb = build_skb(data, frag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) tg3_frag_free(frag_size != 0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) goto drop_it_no_recycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) skb_reserve(skb, TG3_RX_OFFSET(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) tg3_recycle_rx(tnapi, tpr, opaque_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) desc_idx, *post_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) skb = netdev_alloc_skb(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) len + TG3_RAW_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) if (skb == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) goto drop_it_no_recycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) skb_reserve(skb, TG3_RAW_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) memcpy(skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) data + TG3_RX_OFFSET(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) if (tstamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) tg3_hwclock_to_timestamp(tp, tstamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) skb_hwtstamps(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) if ((tp->dev->features & NETIF_F_RXCSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) >> RXD_TCPCSUM_SHIFT) == 0xffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) skb->protocol = eth_type_trans(skb, tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) if (len > (tp->dev->mtu + ETH_HLEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) skb->protocol != htons(ETH_P_8021Q) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) skb->protocol != htons(ETH_P_8021AD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) goto drop_it_no_recycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) if (desc->type_flags & RXD_FLAG_VLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) desc->err_vlan & RXD_VLAN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) napi_gro_receive(&tnapi->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) received++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) budget--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) next_pkt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) (*post_ptr)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) tpr->rx_std_prod_idx = std_prod_idx &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) tpr->rx_std_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) work_mask &= ~RXD_OPAQUE_RING_STD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) rx_std_posted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) next_pkt_nopost:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) sw_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) sw_idx &= tp->rx_ret_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) /* Refresh hw_idx to see if there is new work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) if (sw_idx == hw_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) hw_idx = *(tnapi->rx_rcb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) /* ACK the status ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) tnapi->rx_rcb_ptr = sw_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) tw32_rx_mbox(tnapi->consmbox, sw_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) /* Refill RX ring(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) if (!tg3_flag(tp, ENABLE_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) /* Sync BD data before updating mailbox */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) if (work_mask & RXD_OPAQUE_RING_STD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) tpr->rx_std_prod_idx = std_prod_idx &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) tpr->rx_std_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) if (work_mask & RXD_OPAQUE_RING_JUMBO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) tpr->rx_jmb_prod_idx = jmb_prod_idx &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) tpr->rx_jmb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) } else if (work_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) * updated before the producer indices can be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) if (tnapi != &tp->napi[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) tp->rx_refill = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) napi_schedule(&tp->napi[1].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) return received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) static void tg3_poll_link(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) /* handle link change and other phy events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) struct tg3_hw_status *sblk = tp->napi[0].hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) if (sblk->status & SD_STATUS_LINK_CHG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) sblk->status = SD_STATUS_UPDATED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) (sblk->status & ~SD_STATUS_LINK_CHG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) spin_lock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) tw32_f(MAC_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) (MAC_STATUS_SYNC_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) MAC_STATUS_CFG_CHANGED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) MAC_STATUS_MI_COMPLETION |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) MAC_STATUS_LNKSTATE_CHANGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) spin_unlock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) static int tg3_rx_prodring_xfer(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) struct tg3_rx_prodring_set *dpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) struct tg3_rx_prodring_set *spr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) u32 si, di, cpycnt, src_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) src_prod_idx = spr->rx_std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) /* Make sure updates to the rx_std_buffers[] entries and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) * standard producer index are seen in the correct order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) if (spr->rx_std_cons_idx == src_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) if (spr->rx_std_cons_idx < src_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) cpycnt = src_prod_idx - spr->rx_std_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) cpycnt = tp->rx_std_ring_mask + 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) spr->rx_std_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) cpycnt = min(cpycnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) si = spr->rx_std_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) di = dpr->rx_std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) for (i = di; i < di + cpycnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) if (dpr->rx_std_buffers[i].data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) cpycnt = i - di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) if (!cpycnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) /* Ensure that updates to the rx_std_buffers ring and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) * shadowed hardware producer ring from tg3_recycle_skb() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) * ordered correctly WRT the skb check above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) memcpy(&dpr->rx_std_buffers[di],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) &spr->rx_std_buffers[si],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) cpycnt * sizeof(struct ring_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) for (i = 0; i < cpycnt; i++, di++, si++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) struct tg3_rx_buffer_desc *sbd, *dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) sbd = &spr->rx_std[si];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) dbd = &dpr->rx_std[di];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) dbd->addr_hi = sbd->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) dbd->addr_lo = sbd->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) src_prod_idx = spr->rx_jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) /* Make sure updates to the rx_jmb_buffers[] entries and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) * the jumbo producer index are seen in the correct order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) if (spr->rx_jmb_cons_idx == src_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) if (spr->rx_jmb_cons_idx < src_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) cpycnt = tp->rx_jmb_ring_mask + 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) spr->rx_jmb_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) cpycnt = min(cpycnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) si = spr->rx_jmb_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) di = dpr->rx_jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) for (i = di; i < di + cpycnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) if (dpr->rx_jmb_buffers[i].data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) cpycnt = i - di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) err = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) if (!cpycnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) /* Ensure that updates to the rx_jmb_buffers ring and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) * shadowed hardware producer ring from tg3_recycle_skb() are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) * ordered correctly WRT the skb check above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) memcpy(&dpr->rx_jmb_buffers[di],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) &spr->rx_jmb_buffers[si],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) cpycnt * sizeof(struct ring_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) for (i = 0; i < cpycnt; i++, di++, si++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) struct tg3_rx_buffer_desc *sbd, *dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) sbd = &spr->rx_jmb[si].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) dbd = &dpr->rx_jmb[di].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) dbd->addr_hi = sbd->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) dbd->addr_lo = sbd->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) /* run TX completion thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) tg3_tx(tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) if (!tnapi->rx_rcb_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) /* run RX thread, within the bounds set by NAPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) * All RX "locking" is done by ensuring outside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) * code synchronizes with tg3->napi.poll()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) work_done += tg3_rx(tnapi, budget - work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) u32 std_prod_idx = dpr->rx_std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) tp->rx_refill = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) for (i = 1; i <= tp->rxq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) err |= tg3_rx_prodring_xfer(tp, dpr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) &tp->napi[i].prodring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) if (std_prod_idx != dpr->rx_std_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) dpr->rx_std_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) dpr->rx_jmb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) tw32_f(HOSTCC_MODE, tp->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) static inline void tg3_reset_task_schedule(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) schedule_work(&tp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) static inline void tg3_reset_task_cancel(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) cancel_work_sync(&tp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) tg3_flag_clear(tp, TX_RECOVERY_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) static int tg3_poll_msix(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) work_done = tg3_poll_work(tnapi, work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) goto tx_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) if (unlikely(work_done >= budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) /* tp->last_tag is used in tg3_int_reenable() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) * to tell the hw how much work has been processed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) * so we must read it before checking for more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) tnapi->last_tag = sblk->status_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) tnapi->last_irq_tag = tnapi->last_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) /* check for RX/TX work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) /* This test here is not race free, but will reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) * the number of interrupts by looping again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) if (tnapi == &tp->napi[1] && tp->rx_refill)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) /* Reenable interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) /* This test here is synchronized by napi_schedule()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) * and napi_complete() to close the race condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) tw32(HOSTCC_MODE, tp->coalesce_mode |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) HOSTCC_MODE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) tnapi->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) tx_recovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) /* work_done is guaranteed to be less than budget. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) tg3_reset_task_schedule(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) static void tg3_process_error(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) bool real_error = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) if (tg3_flag(tp, ERROR_PROCESSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) /* Check Flow Attention register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) val = tr32(HOSTCC_FLOW_ATTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) real_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) real_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) real_error = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) if (!real_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) tg3_dump_state(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) tg3_flag_set(tp, ERROR_PROCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) tg3_reset_task_schedule(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) static int tg3_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) if (sblk->status & SD_STATUS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) tg3_process_error(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) tg3_poll_link(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) work_done = tg3_poll_work(tnapi, work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) goto tx_recovery;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) if (unlikely(work_done >= budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) if (tg3_flag(tp, TAGGED_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) /* tp->last_tag is used in tg3_int_reenable() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) * to tell the hw how much work has been processed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) * so we must read it before checking for more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) tnapi->last_tag = sblk->status_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) tnapi->last_irq_tag = tnapi->last_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) sblk->status &= ~SD_STATUS_UPDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) if (likely(!tg3_has_work(tnapi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) tg3_int_reenable(tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) tx_recovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) /* work_done is guaranteed to be less than budget. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) tg3_reset_task_schedule(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) static void tg3_napi_disable(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) for (i = tp->irq_cnt - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) napi_disable(&tp->napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) static void tg3_napi_enable(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) for (i = 0; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) napi_enable(&tp->napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) static void tg3_napi_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) for (i = 1; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) static void tg3_napi_fini(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) for (i = 0; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) netif_napi_del(&tp->napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) static inline void tg3_netif_stop(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) netif_trans_update(tp->dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) tg3_napi_disable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) netif_carrier_off(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) netif_tx_disable(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) /* tp->lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) static inline void tg3_netif_start(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) tg3_ptp_resume(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) /* NOTE: unconditional netif_tx_wake_all_queues is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) * appropriate so long as all callers are assured to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) * have free tx slots (such as after tg3_init_hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) netif_tx_wake_all_queues(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) if (tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) netif_carrier_on(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) tg3_enable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) static void tg3_irq_quiesce(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) __releases(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) __acquires(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) BUG_ON(tp->irq_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) tp->irq_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) for (i = 0; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) synchronize_irq(tp->napi[i].irq_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) /* Fully shutdown all tg3 driver activity elsewhere in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) * If irq_sync is non-zero, then the IRQ handler must be synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) * with as well. Most of the time, this is not necessary except when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) * shutting down the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) if (irq_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) tg3_irq_quiesce(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) static inline void tg3_full_unlock(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) /* One-shot MSI handler - Chip automatically disables interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) * after sending MSI so driver doesn't have to do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) struct tg3_napi *tnapi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) prefetch(tnapi->hw_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) if (tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) if (likely(!tg3_irq_sync(tp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) napi_schedule(&tnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) /* MSI ISR - No need to check for interrupt sharing and no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) * flush status block and interrupt mailbox. PCI ordering rules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) * guarantee that MSI will arrive after the status block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) static irqreturn_t tg3_msi(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) struct tg3_napi *tnapi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) prefetch(tnapi->hw_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) if (tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) * Writing any value to intr-mbox-0 clears PCI INTA# and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) * chip-internal interrupt pending events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) * Writing non-zero to intr-mbox-0 additional tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) * NIC to stop sending us irqs, engaging "in-intr-handler"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) * event coalescing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) tw32_mailbox(tnapi->int_mbox, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) if (likely(!tg3_irq_sync(tp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) napi_schedule(&tnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) return IRQ_RETVAL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) static irqreturn_t tg3_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) struct tg3_napi *tnapi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) unsigned int handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) /* In INTx mode, it is possible for the interrupt to arrive at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) * the CPU before the status block posted prior to the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) * Reading the PCI State register will confirm whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) * interrupt is ours and will flush the status block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) if (tg3_flag(tp, CHIP_RESETTING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) * Writing any value to intr-mbox-0 clears PCI INTA# and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) * chip-internal interrupt pending events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) * Writing non-zero to intr-mbox-0 additional tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) * NIC to stop sending us irqs, engaging "in-intr-handler"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) * event coalescing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) * Flush the mailbox to de-assert the IRQ immediately to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) * spurious interrupts. The flush impacts performance but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) * excessive spurious interrupts can be worse in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) if (tg3_irq_sync(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) sblk->status &= ~SD_STATUS_UPDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) if (likely(tg3_has_work(tnapi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) napi_schedule(&tnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) /* No work, shared interrupt perhaps? re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) * interrupts, and flush that PCI write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) struct tg3_napi *tnapi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) unsigned int handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) /* In INTx mode, it is possible for the interrupt to arrive at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) * the CPU before the status block posted prior to the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) * Reading the PCI State register will confirm whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) * interrupt is ours and will flush the status block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) if (tg3_flag(tp, CHIP_RESETTING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) * writing any value to intr-mbox-0 clears PCI INTA# and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) * chip-internal interrupt pending events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) * writing non-zero to intr-mbox-0 additional tells the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) * NIC to stop sending us irqs, engaging "in-intr-handler"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) * event coalescing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) * Flush the mailbox to de-assert the IRQ immediately to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) * spurious interrupts. The flush impacts performance but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) * excessive spurious interrupts can be worse in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) * In a shared interrupt configuration, sometimes other devices'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) * interrupts will scream. We record the current status tag here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) * so that the above check can report that the screaming interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) * are unhandled. Eventually they will be silenced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) tnapi->last_irq_tag = sblk->status_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) if (tg3_irq_sync(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) napi_schedule(&tnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) /* ISR for interrupt test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) static irqreturn_t tg3_test_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) struct tg3_napi *tnapi = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) struct tg3_hw_status *sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) if ((sblk->status & SD_STATUS_UPDATED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) return IRQ_RETVAL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) return IRQ_RETVAL(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) static void tg3_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) if (tg3_irq_sync(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) for (i = 0; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) if (netif_msg_tx_err(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) netdev_err(dev, "transmit timed out, resetting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) tg3_dump_state(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) tg3_reset_task_schedule(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) u32 base = (u32) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) return base + len + 8 < base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) * of any 4GB boundaries: 4G, 8G, etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) u32 len, u32 mss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) u32 base = (u32) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) return ((base + len + (mss & 0x3fff)) < base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) /* Test for DMA addresses > 40-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) if (tg3_flag(tp, 40BIT_DMA_BUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) return ((u64) mapping + len) > DMA_BIT_MASK(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) dma_addr_t mapping, u32 len, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) u32 mss, u32 vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) txbd->addr_hi = ((u64) mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) txbd->addr_lo = ((u64) mapping & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) dma_addr_t map, u32 len, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) u32 mss, u32 vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) bool hwbug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) hwbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) if (tg3_4g_overflow_test(map, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) hwbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) if (tg3_4g_tso_overflow_test(tp, map, len, mss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) hwbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) if (tg3_40bit_overflow_test(tp, map, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) hwbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718) if (tp->dma_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) u32 prvidx = *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) u32 tmp_flag = flags & ~TXD_FLAG_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) while (len > tp->dma_limit && *budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) u32 frag_len = tp->dma_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) len -= tp->dma_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) /* Avoid the 8byte DMA problem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) if (len <= 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) len += tp->dma_limit / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728) frag_len = tp->dma_limit / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) tnapi->tx_buffers[*entry].fragmented = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) frag_len, tmp_flag, mss, vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) *budget -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) prvidx = *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737) *entry = NEXT_TX(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) map += frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) if (*budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744) tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) len, flags, mss, vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) *budget -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) *entry = NEXT_TX(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) hwbug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) tnapi->tx_buffers[prvidx].fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) len, flags, mss, vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) *entry = NEXT_TX(*entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) return hwbug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) skb = txb->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) txb->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) pci_unmap_single(tnapi->tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) dma_unmap_addr(txb, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) while (txb->fragmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777) txb->fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) txb = &tnapi->tx_buffers[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) for (i = 0; i <= last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) txb = &tnapi->tx_buffers[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) pci_unmap_page(tnapi->tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) dma_unmap_addr(txb, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) skb_frag_size(frag), PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) while (txb->fragmented) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) txb->fragmented = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) entry = NEXT_TX(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) txb = &tnapi->tx_buffers[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) /* Workaround 4GB and 40-bit hardware DMA bugs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) struct sk_buff **pskb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) u32 *entry, u32 *budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) u32 base_flags, u32 mss, u32 vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806) struct tg3 *tp = tnapi->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) struct sk_buff *new_skb, *skb = *pskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) dma_addr_t new_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) if (tg3_asic_rev(tp) != ASIC_REV_5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) new_skb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) int more_headroom = 4 - ((unsigned long)skb->data & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) new_skb = skb_copy_expand(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) skb_headroom(skb) + more_headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) skb_tailroom(skb), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) if (!new_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) /* New SKB is guaranteed to be linear. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) /* Make sure the mapping succeeded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) if (pci_dma_mapping_error(tp->pdev, new_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) dev_kfree_skb_any(new_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) u32 save_entry = *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) base_flags |= TXD_FLAG_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) tnapi->tx_buffers[*entry].skb = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) mapping, new_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) new_skb->len, base_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) mss, vlan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) tg3_tx_skb_unmap(tnapi, save_entry, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) dev_kfree_skb_any(new_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) *pskb = new_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) /* Check if we will never have enough descriptors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) * as gso_segs can be more than current ring size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) /* Use GSO to workaround all TSO packets that meet HW bug conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) * indicated in tg3_tx_frag_set()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868) static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) struct netdev_queue *txq, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872) struct sk_buff *segs, *seg, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) /* Estimate the number of fragments in the worst case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) /* netif_tx_stop_queue() must be done before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) * checking tx index in tg3_tx_avail() below, because in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) * tg3_tx(), we update tx index before checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) * netif_tx_queue_stopped().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) if (tg3_tx_avail(tnapi) <= frag_cnt_est)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) segs = skb_gso_segment(skb, tp->dev->features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) ~(NETIF_F_TSO | NETIF_F_TSO6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) if (IS_ERR(segs) || !segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) goto tg3_tso_bug_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895) skb_list_walk_safe(segs, seg, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) skb_mark_not_on_list(seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) tg3_start_xmit(seg, tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) tg3_tso_bug_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) /* hard_start_xmit for all devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) u32 len, entry, base_flags, mss, vlan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) u32 budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) int i = -1, would_hit_hwbug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) struct tg3_napi *tnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) unsigned int last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) struct iphdr *iph = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) struct tcphdr *tcph = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) __sum16 tcp_csum = 0, ip_csum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) __be16 ip_tot_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) tnapi = &tp->napi[skb_get_queue_mapping(skb)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) tnapi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) budget = tg3_tx_avail(tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) /* We are running in BH disabled context with netif_tx_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) * and TX reclaim runs via tp->napi.poll inside of a software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) * interrupt. Furthermore, IRQ processing runs lockless so we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) * no IRQ context deadlocks to worry about either. Rejoice!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) if (!netif_tx_queue_stopped(txq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) /* This is a hard error, log it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) netdev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) "BUG! Tx Ring full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) entry = tnapi->tx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) base_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) if (mss) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) u32 tcp_opt_len, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) if (skb_cow_head(skb, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) tcp_opt_len = tcp_optlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) /* HW/FW can not correctly segment packets that have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961) * vlan encapsulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) if (skb->protocol == htons(ETH_P_8021Q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964) skb->protocol == htons(ETH_P_8021AD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) if (tg3_tso_bug_gso_check(tnapi, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966) return tg3_tso_bug(tp, tnapi, txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) if (!skb_is_gso_v6(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971) if (unlikely((ETH_HLEN + hdr_len) > 80) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) tg3_flag(tp, TSO_BUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) if (tg3_tso_bug_gso_check(tnapi, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) return tg3_tso_bug(tp, tnapi, txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977) ip_csum = iph->check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) ip_tot_len = iph->tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) iph->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980) iph->tot_len = htons(mss + hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) base_flags |= (TXD_FLAG_CPU_PRE_DMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) TXD_FLAG_CPU_POST_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) tcph = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) tcp_csum = tcph->check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989) if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) tg3_flag(tp, HW_TSO_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) tcph->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993) base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) 0, IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999) if (tg3_flag(tp, HW_TSO_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) mss |= (hdr_len & 0xc) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) if (hdr_len & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) base_flags |= 0x00000010;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) base_flags |= (hdr_len & 0x3e0) << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) } else if (tg3_flag(tp, HW_TSO_2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) mss |= hdr_len << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) else if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008) if (tcp_opt_len || iph->ihl > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) int tsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011) tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) mss |= (tsflags << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) if (tcp_opt_len || iph->ihl > 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) int tsflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) base_flags |= tsflags << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023) /* HW/FW can not correctly checksum packets that have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) * vlan encapsulated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) if (skb->protocol == htons(ETH_P_8021Q) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) skb->protocol == htons(ETH_P_8021AD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) if (skb_checksum_help(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) base_flags |= TXD_FLAG_TCPUDP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036) !mss && skb->len > VLAN_ETH_FRAME_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) base_flags |= TXD_FLAG_JMB_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) base_flags |= TXD_FLAG_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041) vlan = skb_vlan_tag_get(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) tg3_flag(tp, TX_TSTAMP_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047) base_flags |= TXD_FLAG_HWTSTAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052) mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053) if (pci_dma_mapping_error(tp->pdev, mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) tnapi->tx_buffers[entry].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) would_hit_hwbug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) if (tg3_flag(tp, 5701_DMA_BUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) would_hit_hwbug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065) if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) mss, vlan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) would_hit_hwbug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069) } else if (skb_shinfo(skb)->nr_frags > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) u32 tmp_mss = mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072) if (!tg3_flag(tp, HW_TSO_1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) !tg3_flag(tp, HW_TSO_2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) !tg3_flag(tp, HW_TSO_3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) tmp_mss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077) /* Now loop through additional data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) * fragments, and queue them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080) last = skb_shinfo(skb)->nr_frags - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) for (i = 0; i <= last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) tnapi->tx_buffers[entry].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089) dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) if (dma_mapping_error(&tp->pdev->dev, mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092) goto dma_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) if (!budget ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095) tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) len, base_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) ((i == last) ? TXD_FLAG_END : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) tmp_mss, vlan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) would_hit_hwbug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) if (would_hit_hwbug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109) /* If it's a TSO packet, do GSO instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) * allocating and copying to a large linear SKB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112) if (ip_tot_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) iph->check = ip_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) iph->tot_len = ip_tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) tcph->check = tcp_csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) return tg3_tso_bug(tp, tnapi, txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) /* If the workaround fails due to memory/mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) * failure, silently drop this packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123) entry = tnapi->tx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) budget = tg3_tx_avail(tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) base_flags, mss, vlan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) goto drop_nofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131) netdev_tx_sent_queue(txq, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) /* Sync BD data before updating mailbox */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) tnapi->tx_prod = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137) if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) /* netif_tx_stop_queue() must be done before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) * checking tx index in tg3_tx_avail() below, because in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) * tg3_tx(), we update tx index before checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143) * netif_tx_queue_stopped().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151) /* Packets are ready, update Tx producer idx on card. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) tw32_tx_mbox(tnapi->prodmbox, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) dma_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158) tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) drop_nofree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163) tp->tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) static void tg3_mac_loopback(struct tg3 *tp, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) MAC_MODE_PORT_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) tp->mac_mode |= MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179) tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181) tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) if (tg3_flag(tp, 5705_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) tg3_asic_rev(tp) == ASIC_REV_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188) tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) tw32(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) u32 val, bmcr, mac_mode, ptest = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) tg3_phy_toggle_apd(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) tg3_phy_toggle_automdix(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) if (extlpbk && tg3_phy_set_extloopbk(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) bmcr = BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215) speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219) bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) if (extlpbk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225) tg3_readphy(tp, MII_CTRL1000, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) val |= CTL1000_AS_MASTER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) CTL1000_ENABLE_MASTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228) tg3_writephy(tp, MII_CTRL1000, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) ptest = MII_TG3_FET_PTEST_TRIM_SEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) MII_TG3_FET_PTEST_TRIM_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) bmcr |= BMCR_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) tg3_writephy(tp, MII_BMCR, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) /* The write needs to be flushed for the FETs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) if (tp->phy_flags & TG3_PHYFLG_IS_FET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) tg3_asic_rev(tp) == ASIC_REV_5785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247) tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) MII_TG3_FET_PTEST_FRC_TX_LINK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) MII_TG3_FET_PTEST_FRC_TX_LOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) /* The write needs to be flushed for the AC131 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255) /* Reset to prevent losing 1st rx packet intermittently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) tw32_f(MAC_RX_MODE, RX_MODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) tw32_f(MAC_RX_MODE, tp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) mac_mode = tp->mac_mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265) if (speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266) mac_mode |= MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) mac_mode |= MAC_MODE_PORT_MODE_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270) if (tg3_asic_rev(tp) == ASIC_REV_5700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) if (masked_phy_id == TG3_PHY_ID_BCM5401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274) mac_mode &= ~MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) else if (masked_phy_id == TG3_PHY_ID_BCM5411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) mac_mode |= MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) tg3_writephy(tp, MII_TG3_EXT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) MII_TG3_EXT_CTRL_LNK3_LED_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) tw32(MAC_MODE, mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) if (features & NETIF_F_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297) tg3_mac_loopback(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298) netif_carrier_on(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) netdev_info(dev, "Internal MAC loopback mode enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) tg3_mac_loopback(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307) /* Force link status check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308) tg3_setup_phy(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) netdev_info(dev, "Internal MAC loopback mode disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) static netdev_features_t tg3_fix_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) features &= ~NETIF_F_ALL_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) static int tg3_set_features(struct net_device *dev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) netdev_features_t changed = dev->features ^ features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) tg3_set_loopback(dev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) static void tg3_rx_prodring_free(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) struct tg3_rx_prodring_set *tpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) if (tpr != &tp->napi[0].prodring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) i = (i + 1) & tp->rx_std_ring_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343) tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344) tp->rx_pkt_map_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) if (tg3_flag(tp, JUMBO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347) for (i = tpr->rx_jmb_cons_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) i != tpr->rx_jmb_prod_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349) i = (i + 1) & tp->rx_jmb_ring_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) TG3_RX_JMB_MAP_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) for (i = 0; i <= tp->rx_std_ring_mask; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359) tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) tp->rx_pkt_map_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365) TG3_RX_JMB_MAP_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) /* Initialize rx rings for packet processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) * The chip has been shut down and the driver detached from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) * the networking, so no interrupts or new tx packets will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373) * end up in the driver. tp->{tx,}lock are held and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374) * we may not sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) static int tg3_rx_prodring_alloc(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377) struct tg3_rx_prodring_set *tpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) u32 i, rx_pkt_dma_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) tpr->rx_std_cons_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) tpr->rx_std_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383) tpr->rx_jmb_cons_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) tpr->rx_jmb_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) if (tpr != &tp->napi[0].prodring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) memset(&tpr->rx_std_buffers[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) TG3_RX_STD_BUFF_RING_SIZE(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) if (tpr->rx_jmb_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) memset(&tpr->rx_jmb_buffers[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) TG3_RX_JMB_BUFF_RING_SIZE(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) /* Zero out all descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399) if (tg3_flag(tp, 5780_CLASS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) tp->dev->mtu > ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402) tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) /* Initialize invariants of the rings, we only set this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405) * stuff once. This works because the card does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) * write into the rx buffer posting rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408) for (i = 0; i <= tp->rx_std_ring_mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) struct tg3_rx_buffer_desc *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) rxd = &tpr->rx_std[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413) rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) rxd->opaque = (RXD_OPAQUE_RING_STD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) (i << RXD_OPAQUE_INDEX_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418) /* Now allocate fresh SKBs for each rx ring. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) for (i = 0; i < tp->rx_pending; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) unsigned int frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423) &frag_size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) "Using a smaller RX standard ring. Only "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) "%d out of %d buffers were allocated "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) "successfully\n", i, tp->rx_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) goto initfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430) tp->rx_pending = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438) memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) if (!tg3_flag(tp, JUMBO_RING_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443) for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) struct tg3_rx_buffer_desc *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) rxd = &tpr->rx_jmb[i].std;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) RXD_FLAG_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) (i << RXD_OPAQUE_INDEX_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) for (i = 0; i < tp->rx_jumbo_pending; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) unsigned int frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) &frag_size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) "Using a smaller RX jumbo ring. Only %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) "out of %d buffers were allocated "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462) "successfully\n", i, tp->rx_jumbo_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) goto initfail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) tp->rx_jumbo_pending = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) initfail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474) tg3_rx_prodring_free(tp, tpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) static void tg3_rx_prodring_fini(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) struct tg3_rx_prodring_set *tpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481) kfree(tpr->rx_std_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) tpr->rx_std_buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) kfree(tpr->rx_jmb_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484) tpr->rx_jmb_buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) if (tpr->rx_std) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486) dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) tpr->rx_std, tpr->rx_std_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488) tpr->rx_std = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) if (tpr->rx_jmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) tpr->rx_jmb, tpr->rx_jmb_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) tpr->rx_jmb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) static int tg3_rx_prodring_init(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) struct tg3_rx_prodring_set *tpr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500) tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) if (!tpr->rx_std_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) TG3_RX_STD_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) &tpr->rx_std_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509) if (!tpr->rx_std)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512) if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515) if (!tpr->rx_jmb_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) TG3_RX_JMB_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) &tpr->rx_jmb_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) if (!tpr->rx_jmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) tg3_rx_prodring_fini(tp, tpr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) /* Free up pending packets in all rx/tx rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) * The chip has been shut down and the driver detached from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) * the networking, so no interrupts or new tx packets will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537) * end up in the driver. tp->{tx,}lock is not held and we are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) * in an interrupt context and thus may sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) static void tg3_free_rings(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544) for (j = 0; j < tp->irq_cnt; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) struct tg3_napi *tnapi = &tp->napi[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) tg3_rx_prodring_free(tp, &tnapi->prodring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) if (!tnapi->tx_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552) for (i = 0; i < TG3_TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) struct sk_buff *skb = tnapi->tx_buffers[i].skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) tg3_tx_skb_unmap(tnapi, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559) skb_shinfo(skb)->nr_frags - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567) /* Initialize tx/rx rings for packet processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) * The chip has been shut down and the driver detached from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570) * the networking, so no interrupts or new tx packets will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) * end up in the driver. tp->{tx,}lock are held and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572) * we may not sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) static int tg3_init_rings(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) /* Free up all the SKBs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) tg3_free_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584) tnapi->last_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) tnapi->last_irq_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) tnapi->hw_status->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) tnapi->hw_status->status_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590) tnapi->tx_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) tnapi->tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) if (tnapi->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595) tnapi->rx_rcb_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) if (tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599) if (tnapi->prodring.rx_std &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600) tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) tg3_free_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609) static void tg3_mem_tx_release(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) for (i = 0; i < tp->irq_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) if (tnapi->tx_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) tnapi->tx_ring, tnapi->tx_desc_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) tnapi->tx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) kfree(tnapi->tx_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) tnapi->tx_buffers = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627) static int tg3_mem_tx_acquire(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) struct tg3_napi *tnapi = &tp->napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) /* If multivector TSS is enabled, vector 0 does not handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) * tx interrupts. Don't allocate any resources for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636) tnapi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) sizeof(struct tg3_tx_ring_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642) if (!tnapi->tx_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) TG3_TX_RING_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647) &tnapi->tx_desc_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649) if (!tnapi->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656) tg3_mem_tx_release(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) static void tg3_mem_rx_release(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) for (i = 0; i < tp->irq_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) tg3_rx_prodring_fini(tp, &tnapi->prodring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) if (!tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672) dma_free_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) TG3_RX_RCB_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) tnapi->rx_rcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) tnapi->rx_rcb_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) tnapi->rx_rcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) static int tg3_mem_rx_acquire(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) unsigned int i, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) limit = tp->rxq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) /* If RSS is enabled, we need a (dummy) producer ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) * set on vector zero. This is the true hw prodring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689) if (tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) limit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) for (i = 0; i < limit; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) if (tg3_rx_prodring_init(tp, &tnapi->prodring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698) /* If multivector RSS is enabled, vector 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) * does not handle rx or tx interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) * Don't allocate any resources for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702) if (!i && tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705) tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) TG3_RX_RCB_RING_BYTES(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707) &tnapi->rx_rcb_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709) if (!tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) tg3_mem_rx_release(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) * Must not be invoked with interrupt sources disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) * the hardware shutdown down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) static void tg3_free_consistent(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731) if (tnapi->hw_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) tnapi->hw_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) tnapi->status_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) tnapi->hw_status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) tg3_mem_rx_release(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) tg3_mem_tx_release(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) /* tp->hw_stats can be referenced safely:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) * 1. under rtnl_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744) * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) if (tp->hw_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747) dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) tp->hw_stats, tp->stats_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) tp->hw_stats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) * Must not be invoked with interrupt sources disabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) * the hardware shutdown down. Can sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757) static int tg3_alloc_consistent(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) sizeof(struct tg3_hw_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) &tp->stats_mapping, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) if (!tp->hw_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) struct tg3_hw_status *sblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771) tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) TG3_HW_STATUS_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) &tnapi->status_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) if (!tnapi->hw_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778) sblk = tnapi->hw_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) if (tg3_flag(tp, ENABLE_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) u16 *prodptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) * When RSS is enabled, the status block format changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) * slightly. The "rx_jumbo_consumer", "reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) * and "rx_mini_consumer" members get mapped to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) * other three rx return ring producer indexes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) prodptr = &sblk->idx[0].rx_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) prodptr = &sblk->rx_jumbo_consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) prodptr = &sblk->reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) prodptr = &sblk->rx_mini_consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803) tnapi->rx_rcb_prod_idx = prodptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) tg3_free_consistent(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) #define MAX_WAIT_CNT 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821) /* To stop a block, clear the enable bit and poll till it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) * clears. tp->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) switch (ofs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) case RCVLSC_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) case DMAC_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) case MBFREE_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834) case BUFMGR_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835) case MEMARB_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) /* We can't enable/disable these bits of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) * 5705/5750, just say success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846) val = tr32(ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847) val &= ~enable_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) tw32_f(ofs, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) for (i = 0; i < MAX_WAIT_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) if (pci_channel_offline(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853) "tg3_stop_block device offline, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854) "ofs=%lx enable_bit=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) ofs, enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) val = tr32(ofs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) if ((val & enable_bit) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) if (i == MAX_WAIT_CNT && !silent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868) ofs, enable_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876) static int tg3_abort_hw(struct tg3 *tp, bool silent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882) if (pci_channel_offline(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886) goto err_no_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) tp->rx_mode &= ~RX_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890) tw32_f(MAC_RX_MODE, tp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893) err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894) err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895) err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897) err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902) err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903) err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905) err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906) err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908) tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) tp->tx_mode &= ~TX_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913) tw32_f(MAC_TX_MODE, tp->tx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) for (i = 0; i < MAX_WAIT_CNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917) if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920) if (i >= MAX_WAIT_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922) "%s timed out, TX_MODE_ENABLE will not clear "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924) err |= -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929) err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931) tw32(FTQ_RESET, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932) tw32(FTQ_RESET, 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8937) err_no_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8938) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8939) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8940) if (tnapi->hw_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8941) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8944) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8947) /* Save PCI command register before chip reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8948) static void tg3_save_pci_state(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8950) pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8953) /* Restore PCI state after chip reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8954) static void tg3_restore_pci_state(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8956) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8958) /* Re-enable indirect register accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8959) pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8960) tp->misc_host_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8962) /* Set MAX PCI retry to zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8963) val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8964) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8965) tg3_flag(tp, PCIX_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8966) val |= PCISTATE_RETRY_SAME_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8967) /* Allow reads and writes to the APE register and memory space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8968) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8969) val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8970) PCISTATE_ALLOW_APE_SHMEM_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8971) PCISTATE_ALLOW_APE_PSPACE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8972) pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8974) pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8976) if (!tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8977) pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8978) tp->pci_cacheline_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8979) pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8980) tp->pci_lat_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8983) /* Make sure PCI-X relaxed ordering bit is clear. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8984) if (tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8985) u16 pcix_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8987) pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8988) &pcix_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8989) pcix_cmd &= ~PCI_X_CMD_ERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8990) pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8991) pcix_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8994) if (tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8996) /* Chip reset on 5780 will reset MSI enable bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8997) * so need to restore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8999) if (tg3_flag(tp, USING_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9000) u16 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9002) pci_read_config_word(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9003) tp->msi_cap + PCI_MSI_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9004) &ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9005) pci_write_config_word(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9006) tp->msi_cap + PCI_MSI_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9007) ctrl | PCI_MSI_FLAGS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9008) val = tr32(MSGINT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9009) tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9014) static void tg3_override_clk(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9016) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9018) switch (tg3_asic_rev(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9019) case ASIC_REV_5717:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9020) val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9021) tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9022) TG3_CPMU_MAC_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9025) case ASIC_REV_5719:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9026) case ASIC_REV_5720:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9027) tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9030) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9031) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9035) static void tg3_restore_clk(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9037) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9039) switch (tg3_asic_rev(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9040) case ASIC_REV_5717:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9041) val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9042) tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9043) val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9044) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9046) case ASIC_REV_5719:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9047) case ASIC_REV_5720:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9048) val = tr32(TG3_CPMU_CLCK_ORIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9049) tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9050) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9052) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9053) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9057) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9058) static int tg3_chip_reset(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9059) __releases(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9060) __acquires(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9062) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9063) void (*write_op)(struct tg3 *, u32, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9064) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9066) if (!pci_device_is_present(tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9067) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9069) tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9071) tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9073) /* No matching tg3_nvram_unlock() after this because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9074) * chip reset below will undo the nvram lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9076) tp->nvram_lock_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9078) /* GRC_MISC_CFG core clock reset will clear the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9079) * enable bit in PCI register 4 and the MSI enable bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9080) * on some chips, so we save relevant registers here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9082) tg3_save_pci_state(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9084) if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9085) tg3_flag(tp, 5755_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9086) tw32(GRC_FASTBOOT_PC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9089) * We must avoid the readl() that normally takes place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9090) * It locks machines, causes machine checks, and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9091) * fun things. So, temporarily disable the 5701
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9092) * hardware workaround, while we do the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9094) write_op = tp->write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9095) if (write_op == tg3_write_flush_reg32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9096) tp->write32 = tg3_write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9098) /* Prevent the irq handler from reading or writing PCI registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9099) * during chip reset when the memory enable bit in the PCI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9100) * register may be cleared. The chip does not generate interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9101) * at this time, but the irq handler may still be called due to irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9102) * sharing or irqpoll.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9104) tg3_flag_set(tp, CHIP_RESETTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9105) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9106) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9107) if (tnapi->hw_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9108) tnapi->hw_status->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9109) tnapi->hw_status->status_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9111) tnapi->last_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9112) tnapi->last_irq_tag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9114) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9116) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9118) for (i = 0; i < tp->irq_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9119) synchronize_irq(tp->napi[i].irq_vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9121) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9123) if (tg3_asic_rev(tp) == ASIC_REV_57780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9124) val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9125) tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9128) /* do the reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9129) val = GRC_MISC_CFG_CORECLK_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9131) if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9132) /* Force PCIe 1.0a mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9133) if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9134) !tg3_flag(tp, 57765_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9135) tr32(TG3_PCIE_PHY_TSTCTL) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9136) (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9137) tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9139) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9140) tw32(GRC_MISC_CFG, (1 << 29));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9141) val |= (1 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9145) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9146) tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9147) tw32(GRC_VCPU_EXT_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9148) tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9151) /* Set the clock to the highest frequency to avoid timeouts. With link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9152) * aware mode, the clock speed could be slow and bootcode does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9153) * complete within the expected time. Override the clock to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9154) * bootcode to finish sooner and then restore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9156) tg3_override_clk(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9158) /* Manage gphy power for all CPMU absent PCIe devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9159) if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9160) val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9162) tw32(GRC_MISC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9164) /* restore 5701 hardware bug workaround write method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9165) tp->write32 = write_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9167) /* Unfortunately, we have to delay before the PCI read back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9168) * Some 575X chips even will not respond to a PCI cfg access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9169) * when the reset command is given to the chip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9171) * How do these hardware designers expect things to work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9172) * properly if the PCI write is posted for a long period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9173) * of time? It is always necessary to have some method by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9174) * which a register read back can occur to push the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9175) * out which does the reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9177) * For most tg3 variants the trick below was working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9178) * Ho hum...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9180) udelay(120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9182) /* Flush PCI posted writes. The normal MMIO registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9183) * are inaccessible at this time so this is the only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9184) * way to make this reliably (actually, this is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9185) * the case, see above). I tried to use indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9186) * register read/write but this upset some 5701 variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9188) pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9190) udelay(120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9192) if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9193) u16 val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9195) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9196) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9197) u32 cfg_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9199) /* Wait for link training to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9200) for (j = 0; j < 5000; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9201) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9203) pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9204) pci_write_config_dword(tp->pdev, 0xc4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9205) cfg_val | (1 << 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9208) /* Clear the "no snoop" and "relaxed ordering" bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9209) val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9211) * Older PCIe devices only support the 128 byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9212) * MPS setting. Enforce the restriction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9214) if (!tg3_flag(tp, CPMU_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9215) val16 |= PCI_EXP_DEVCTL_PAYLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9216) pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9218) /* Clear error status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9219) pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9220) PCI_EXP_DEVSTA_CED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9221) PCI_EXP_DEVSTA_NFED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9222) PCI_EXP_DEVSTA_FED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9223) PCI_EXP_DEVSTA_URD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9226) tg3_restore_pci_state(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9228) tg3_flag_clear(tp, CHIP_RESETTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9229) tg3_flag_clear(tp, ERROR_PROCESSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9231) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9232) if (tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9233) val = tr32(MEMARB_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9234) tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9236) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9237) tg3_stop_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9238) tw32(0x5000, 0x400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9241) if (tg3_flag(tp, IS_SSB_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9243) * BCM4785: In order to avoid repercussions from using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9244) * potentially defective internal ROM, stop the Rx RISC CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9245) * which is not required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9247) tg3_stop_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9248) tg3_halt_cpu(tp, RX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9251) err = tg3_poll_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9252) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9253) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9255) tw32(GRC_MODE, tp->grc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9257) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9258) val = tr32(0xc4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9260) tw32(0xc4, val | (1 << 15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9263) if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9264) tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9265) tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9266) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9267) tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9268) tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9271) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9272) tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9273) val = tp->mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9274) } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9275) tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9276) val = tp->mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9277) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9278) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9280) tw32_f(MAC_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9281) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9283) tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9285) tg3_mdio_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9287) if (tg3_flag(tp, PCI_EXPRESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9288) tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9289) tg3_asic_rev(tp) != ASIC_REV_5785 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9290) !tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9291) val = tr32(0x7c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9293) tw32(0x7c00, val | (1 << 25));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9296) tg3_restore_clk(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9298) /* Increase the core clock speed to fix tx timeout issue for 5762
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9299) * with 100Mbps link speed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9301) if (tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9302) val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9303) tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9304) TG3_CPMU_MAC_ORIDE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9307) /* Reprobe ASF enable state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9308) tg3_flag_clear(tp, ENABLE_ASF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9309) tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9310) TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9312) tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9313) tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9314) if (val == NIC_SRAM_DATA_SIG_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9315) u32 nic_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9317) tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9318) if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9319) tg3_flag_set(tp, ENABLE_ASF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9320) tp->last_event_jiffies = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9321) if (tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9322) tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9324) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9325) if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9326) tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9327) if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9328) tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9332) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9335) static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9336) static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9337) static void __tg3_set_rx_mode(struct net_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9339) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9340) static int tg3_halt(struct tg3 *tp, int kind, bool silent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9342) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9344) tg3_stop_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9346) tg3_write_sig_pre_reset(tp, kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9348) tg3_abort_hw(tp, silent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9349) err = tg3_chip_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9351) __tg3_set_mac_addr(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9353) tg3_write_sig_legacy(tp, kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9354) tg3_write_sig_post_reset(tp, kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9356) if (tp->hw_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9357) /* Save the stats across chip resets... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9358) tg3_get_nstats(tp, &tp->net_stats_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9359) tg3_get_estats(tp, &tp->estats_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9361) /* And make sure the next sample is new data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9362) memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9365) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9368) static int tg3_set_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9370) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9371) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9372) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9373) bool skip_mac_1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9375) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9376) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9378) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9380) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9383) if (tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9384) u32 addr0_high, addr0_low, addr1_high, addr1_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9386) addr0_high = tr32(MAC_ADDR_0_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9387) addr0_low = tr32(MAC_ADDR_0_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9388) addr1_high = tr32(MAC_ADDR_1_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9389) addr1_low = tr32(MAC_ADDR_1_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9391) /* Skip MAC addr 1 if ASF is using it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9392) if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9393) !(addr1_high == 0 && addr1_low == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9394) skip_mac_1 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9396) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9397) __tg3_set_mac_addr(tp, skip_mac_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9398) __tg3_set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9399) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9401) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9404) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9405) static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9406) dma_addr_t mapping, u32 maxlen_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9407) u32 nic_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9409) tg3_write_mem(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9410) (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9411) ((u64) mapping >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9412) tg3_write_mem(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9413) (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9414) ((u64) mapping & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9415) tg3_write_mem(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9416) (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9417) maxlen_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9419) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9420) tg3_write_mem(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9421) (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9422) nic_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9426) static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9428) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9430) if (!tg3_flag(tp, ENABLE_TSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9431) tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9432) tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9433) tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9435) tw32(HOSTCC_TXCOL_TICKS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9436) tw32(HOSTCC_TXMAX_FRAMES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9437) tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9439) for (; i < tp->txq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9440) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9442) reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9443) tw32(reg, ec->tx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9444) reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9445) tw32(reg, ec->tx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9446) reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9447) tw32(reg, ec->tx_max_coalesced_frames_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9451) for (; i < tp->irq_max - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9452) tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9453) tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9454) tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9458) static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9460) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9461) u32 limit = tp->rxq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9463) if (!tg3_flag(tp, ENABLE_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9464) tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9465) tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9466) tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9467) limit--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9469) tw32(HOSTCC_RXCOL_TICKS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9470) tw32(HOSTCC_RXMAX_FRAMES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9471) tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9474) for (; i < limit; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9475) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9477) reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9478) tw32(reg, ec->rx_coalesce_usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9479) reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9480) tw32(reg, ec->rx_max_coalesced_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9481) reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9482) tw32(reg, ec->rx_max_coalesced_frames_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9485) for (; i < tp->irq_max - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9486) tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9487) tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9488) tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9492) static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9494) tg3_coal_tx_init(tp, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9495) tg3_coal_rx_init(tp, ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9497) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9498) u32 val = ec->stats_block_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9500) tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9501) tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9503) if (!tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9504) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9506) tw32(HOSTCC_STAT_COAL_TICKS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9510) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9511) static void tg3_tx_rcbs_disable(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9513) u32 txrcb, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9515) /* Disable all transmit rings but the first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9516) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9517) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9518) else if (tg3_flag(tp, 5717_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9519) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9520) else if (tg3_flag(tp, 57765_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9521) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9522) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9523) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9524) limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9526) for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9527) txrcb < limit; txrcb += TG3_BDINFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9528) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9529) BDINFO_FLAGS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9532) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9533) static void tg3_tx_rcbs_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9535) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9536) u32 txrcb = NIC_SRAM_SEND_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9538) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9539) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9541) for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9542) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9544) if (!tnapi->tx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9545) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9547) tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9548) (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9549) NIC_SRAM_TX_BUFFER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9553) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9554) static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9556) u32 rxrcb, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9558) /* Disable all receive return rings but the first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9559) if (tg3_flag(tp, 5717_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9560) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9561) else if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9562) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9563) else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9564) tg3_asic_rev(tp) == ASIC_REV_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9565) tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9566) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9567) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9568) limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9570) for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9571) rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9572) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9573) BDINFO_FLAGS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9576) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9577) static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9579) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9580) u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9582) if (tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9583) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9585) for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9586) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9588) if (!tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9589) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9591) tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9592) (tp->rx_ret_ring_mask + 1) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9593) BDINFO_FLAGS_MAXLEN_SHIFT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9597) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9598) static void tg3_rings_reset(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9600) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9601) u32 stblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9602) struct tg3_napi *tnapi = &tp->napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9604) tg3_tx_rcbs_disable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9606) tg3_rx_ret_rcbs_disable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9608) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9609) tw32_mailbox_f(tp->napi[0].int_mbox, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9610) tp->napi[0].chk_msi_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9611) tp->napi[0].last_rx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9612) tp->napi[0].last_tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9614) /* Zero mailbox registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9615) if (tg3_flag(tp, SUPPORT_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9616) for (i = 1; i < tp->irq_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9617) tp->napi[i].tx_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9618) tp->napi[i].tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9619) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9620) tw32_mailbox(tp->napi[i].prodmbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9621) tw32_rx_mbox(tp->napi[i].consmbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9622) tw32_mailbox_f(tp->napi[i].int_mbox, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9623) tp->napi[i].chk_msi_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9624) tp->napi[i].last_rx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9625) tp->napi[i].last_tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9627) if (!tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9628) tw32_mailbox(tp->napi[0].prodmbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9629) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9630) tp->napi[0].tx_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9631) tp->napi[0].tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9632) tw32_mailbox(tp->napi[0].prodmbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9633) tw32_rx_mbox(tp->napi[0].consmbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9636) /* Make sure the NIC-based send BD rings are disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9637) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9638) u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9639) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9640) tw32_tx_mbox(mbox + i * 8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9643) /* Clear status block in ram. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9644) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9646) /* Set status block DMA address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9647) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9648) ((u64) tnapi->status_mapping >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9649) tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9650) ((u64) tnapi->status_mapping & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9652) stblk = HOSTCC_STATBLCK_RING1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9654) for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9655) u64 mapping = (u64)tnapi->status_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9656) tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9657) tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9658) stblk += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9660) /* Clear status block in ram. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9661) memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9664) tg3_tx_rcbs_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9665) tg3_rx_ret_rcbs_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9668) static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9670) u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9672) if (!tg3_flag(tp, 5750_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9673) tg3_flag(tp, 5780_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9674) tg3_asic_rev(tp) == ASIC_REV_5750 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9675) tg3_asic_rev(tp) == ASIC_REV_5752 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9676) tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9677) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9678) else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9679) tg3_asic_rev(tp) == ASIC_REV_5787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9680) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9681) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9682) bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9684) nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9685) host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9687) val = min(nic_rep_thresh, host_rep_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9688) tw32(RCVBDI_STD_THRESH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9690) if (tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9691) tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9693) if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9696) bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9698) host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9700) val = min(bdcache_maxcnt / 2, host_rep_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9701) tw32(RCVBDI_JUMBO_THRESH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9703) if (tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9704) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9707) static inline u32 calc_crc(unsigned char *buf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9709) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9710) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9711) int j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9713) reg = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9715) for (j = 0; j < len; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9716) reg ^= buf[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9718) for (k = 0; k < 8; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9719) tmp = reg & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9721) reg >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9723) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9724) reg ^= CRC32_POLY_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9728) return ~reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9731) static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9733) /* accept or reject all multicast frames */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9734) tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9735) tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9736) tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9737) tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9740) static void __tg3_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9742) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9743) u32 rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9745) rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9746) RX_MODE_KEEP_VLAN_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9748) #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9749) /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9750) * flag clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9752) if (!tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9753) rx_mode |= RX_MODE_KEEP_VLAN_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9754) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9756) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9757) /* Promiscuous mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9758) rx_mode |= RX_MODE_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9759) } else if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9760) /* Accept all multicast. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9761) tg3_set_multi(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9762) } else if (netdev_mc_empty(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9763) /* Reject all multicast. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9764) tg3_set_multi(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9765) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9766) /* Accept one or more multicast(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9767) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9768) u32 mc_filter[4] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9769) u32 regidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9770) u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9771) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9773) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9774) crc = calc_crc(ha->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9775) bit = ~crc & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9776) regidx = (bit & 0x60) >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9777) bit &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9778) mc_filter[regidx] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9781) tw32(MAC_HASH_REG_0, mc_filter[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9782) tw32(MAC_HASH_REG_1, mc_filter[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9783) tw32(MAC_HASH_REG_2, mc_filter[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9784) tw32(MAC_HASH_REG_3, mc_filter[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9787) if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9788) rx_mode |= RX_MODE_PROMISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9789) } else if (!(dev->flags & IFF_PROMISC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9790) /* Add all entries into to the mac addr filter list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9791) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9792) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9794) netdev_for_each_uc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9795) __tg3_set_one_mac_addr(tp, ha->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9796) i + TG3_UCAST_ADDR_IDX(tp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9797) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9801) if (rx_mode != tp->rx_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9802) tp->rx_mode = rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9803) tw32_f(MAC_RX_MODE, rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9804) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9808) static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9810) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9812) for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9813) tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9816) static void tg3_rss_check_indir_tbl(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9818) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9820) if (!tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9823) if (tp->rxq_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9824) memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9825) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9828) /* Validate table against current IRQ count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9829) for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9830) if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9831) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9834) if (i != TG3_RSS_INDIR_TBL_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9835) tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9838) static void tg3_rss_write_indir_tbl(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9840) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9841) u32 reg = MAC_RSS_INDIR_TBL_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9843) while (i < TG3_RSS_INDIR_TBL_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9844) u32 val = tp->rss_ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9845) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9846) for (; i % 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9847) val <<= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9848) val |= tp->rss_ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9850) tw32(reg, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9851) reg += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9855) static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9857) if (tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9858) return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9859) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9860) return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9863) /* tp->lock is held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9864) static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9866) u32 val, rdmac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9867) int i, err, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9868) struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9870) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9872) tg3_stop_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9874) tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9876) if (tg3_flag(tp, INIT_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9877) tg3_abort_hw(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9879) if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9880) !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9881) tg3_phy_pull_config(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9882) tg3_eee_pull_config(tp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9883) tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9886) /* Enable MAC control of LPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9887) if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9888) tg3_setup_eee(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9890) if (reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9891) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9893) err = tg3_chip_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9894) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9895) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9897) tg3_write_sig_legacy(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9899) if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9900) val = tr32(TG3_CPMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9901) val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9902) tw32(TG3_CPMU_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9904) val = tr32(TG3_CPMU_LSPD_10MB_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9905) val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9906) val |= CPMU_LSPD_10MB_MACCLK_6_25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9907) tw32(TG3_CPMU_LSPD_10MB_CLK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9909) val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9910) val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9911) val |= CPMU_LNK_AWARE_MACCLK_6_25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9912) tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9914) val = tr32(TG3_CPMU_HST_ACC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9915) val &= ~CPMU_HST_ACC_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9916) val |= CPMU_HST_ACC_MACCLK_6_25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9917) tw32(TG3_CPMU_HST_ACC, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9920) if (tg3_asic_rev(tp) == ASIC_REV_57780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9921) val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9922) val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9923) PCIE_PWR_MGMT_L1_THRESH_4MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9924) tw32(PCIE_PWR_MGMT_THRESH, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9926) val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9927) tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9929) tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9931) val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9932) tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9935) if (tg3_flag(tp, L1PLLPD_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9936) u32 grc_mode = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9938) /* Access the lower 1K of PL PCIE block registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9939) val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9940) tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9942) val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9943) tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9944) val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9946) tw32(GRC_MODE, grc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9949) if (tg3_flag(tp, 57765_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9950) if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9951) u32 grc_mode = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9953) /* Access the lower 1K of PL PCIE block registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9954) val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9955) tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9957) val = tr32(TG3_PCIE_TLDLPL_PORT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9958) TG3_PCIE_PL_LO_PHYCTL5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9959) tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9960) val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9962) tw32(GRC_MODE, grc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9965) if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9966) u32 grc_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9968) /* Fix transmit hangs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9969) val = tr32(TG3_CPMU_PADRNG_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9970) val |= TG3_CPMU_PADRNG_CTL_RDIV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9971) tw32(TG3_CPMU_PADRNG_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9973) grc_mode = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9975) /* Access the lower 1K of DL PCIE block registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9976) val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9977) tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9979) val = tr32(TG3_PCIE_TLDLPL_PORT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9980) TG3_PCIE_DL_LO_FTSMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9981) val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9982) tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9983) val | TG3_PCIE_DL_LO_FTSMAX_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9985) tw32(GRC_MODE, grc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9988) val = tr32(TG3_CPMU_LSPD_10MB_CLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9989) val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9990) val |= CPMU_LSPD_10MB_MACCLK_6_25;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9991) tw32(TG3_CPMU_LSPD_10MB_CLK, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9994) /* This works around an issue with Athlon chipsets on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9995) * B3 tigon3 silicon. This bit has no effect on any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9996) * other revision. But do not set this on PCI Express
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9997) * chips and don't even touch the clocks if the CPMU is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9999) if (!tg3_flag(tp, CPMU_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) if (!tg3_flag(tp, PCI_EXPRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006) tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007) val = tr32(TG3PCI_PCISTATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008) val |= PCISTATE_RETRY_SAME_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009) tw32(TG3PCI_PCISTATE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) if (tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) /* Allow reads and writes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014) * APE register and memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) val = tr32(TG3PCI_PCISTATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017) val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) PCISTATE_ALLOW_APE_SHMEM_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019) PCISTATE_ALLOW_APE_PSPACE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020) tw32(TG3PCI_PCISTATE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024) /* Enable some hw fixes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025) val = tr32(TG3PCI_MSI_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026) val |= (1 << 26) | (1 << 28) | (1 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027) tw32(TG3PCI_MSI_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030) /* Descriptor ring init may make accesses to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031) * NIC SRAM area to setup the TX descriptors, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) * can only do this after the hardware has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) * successfully reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035) err = tg3_init_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) val = tr32(TG3PCI_DMA_RW_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044) if (!tg3_flag(tp, 57765_CLASS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) tg3_asic_rev(tp) != ASIC_REV_5717 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) tg3_asic_rev(tp) != ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) val |= DMA_RWCTRL_TAGGED_STAT_WA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048) tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049) } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) tg3_asic_rev(tp) != ASIC_REV_5761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) /* This value is determined during the probe time DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) * engine test, tg3_test_dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) GRC_MODE_4X_NIC_SEND_RINGS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) GRC_MODE_NO_TX_PHDR_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) GRC_MODE_NO_RX_PHDR_CSUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061) tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) /* Pseudo-header checksum is done by hardware logic and not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) * the offload processers, so make the chip do the pseudo-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) * header checksums on receive. For transmit it is more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) * convenient to do the pseudo-header checksum in software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) * as Linux does that on transmit for us in all cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071) val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) if (tp->rxptpctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) tw32(TG3_RX_PTP_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076) if (tg3_flag(tp, PTP_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) val |= GRC_MODE_TIME_SYNC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079) tw32(GRC_MODE, tp->grc_mode | val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081) /* On one of the AMD platform, MRRS is restricted to 4000 because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082) * south bridge limitation. As a workaround, Driver is setting MRRS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083) * to 2048 instead of default 4096.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085) if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086) tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087) val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) /* Setup the timer prescalar register. Clock is always 66Mhz. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092) val = tr32(GRC_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) val &= ~0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095) tw32(GRC_MISC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) /* Initialize MBUF/DESC pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098) if (tg3_flag(tp, 5750_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099) /* Do nothing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) if (tg3_asic_rev(tp) == ASIC_REV_5704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105) tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106) tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107) tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108) } else if (tg3_flag(tp, TSO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109) int fw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111) fw_len = tp->fw_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112) fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113) tw32(BUFMGR_MB_POOL_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114) NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115) tw32(BUFMGR_MB_POOL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) if (tp->dev->mtu <= ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120) tw32(BUFMGR_MB_RDMA_LOW_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) tp->bufmgr_config.mbuf_read_dma_low_water);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) tw32(BUFMGR_MB_MACRX_LOW_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123) tp->bufmgr_config.mbuf_mac_rx_low_water);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) tw32(BUFMGR_MB_HIGH_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) tp->bufmgr_config.mbuf_high_water);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) tw32(BUFMGR_MB_RDMA_LOW_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129) tw32(BUFMGR_MB_MACRX_LOW_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) tw32(BUFMGR_MB_HIGH_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) tp->bufmgr_config.mbuf_high_water_jumbo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) tw32(BUFMGR_DMA_LOW_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135) tp->bufmgr_config.dma_low_water);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) tw32(BUFMGR_DMA_HIGH_WATER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) tp->bufmgr_config.dma_high_water);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) if (tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) val |= BUFMGR_MODE_NO_TX_UNDERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143) tg3_asic_rev(tp) == ASIC_REV_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144) tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) tw32(BUFMGR_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) for (i = 0; i < 2000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) if (i >= 2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161) tg3_setup_rxbd_thresholds(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163) /* Initialize TG3_BDINFO's at:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164) * RCVDBDI_STD_BD: standard eth size rx ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165) * RCVDBDI_JUMBO_BD: jumbo frame rx ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166) * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168) * like so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) * ring attribute flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) * The size of each ring is fixed in the firmware, but the location is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) * configurable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) ((u64) tpr->rx_std_mapping >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) ((u64) tpr->rx_std_mapping & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) if (!tg3_flag(tp, 5717_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185) tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) NIC_SRAM_RX_BUFFER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) /* Disable the mini ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) BDINFO_FLAGS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193) /* Program the jumbo buffer descriptor ring control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) * blocks on those devices that have them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) ((u64) tpr->rx_jmb_mapping >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) ((u64) tpr->rx_jmb_mapping & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) val = TG3_RX_JMB_RING_SIZE(tp) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) BDINFO_FLAGS_MAXLEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) val | BDINFO_FLAGS_USE_EXT_RECV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10208) if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10209) tg3_flag(tp, 57765_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10210) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10211) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10212) NIC_SRAM_RX_JUMBO_BUFFER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10214) tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10215) BDINFO_FLAGS_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10218) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10219) val = TG3_RX_STD_RING_SIZE(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10220) val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10221) val |= (TG3_RX_STD_DMA_SZ << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10222) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10223) val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10224) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10225) val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10227) tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10229) tpr->rx_std_prod_idx = tp->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10230) tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10232) tpr->rx_jmb_prod_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10233) tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10234) tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10236) tg3_rings_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10238) /* Initialize MAC address and backoff seed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10239) __tg3_set_mac_addr(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10241) /* MTU + ethernet header + FCS + optional VLAN tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10242) tw32(MAC_RX_MTU_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10243) tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10245) /* The slot time is changed by tg3_setup_phy if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10246) * run at gigabit with half duplex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10248) val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10249) (6 << TX_LENGTHS_IPG_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10250) (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10252) if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10253) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10254) val |= tr32(MAC_TX_LENGTHS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10255) (TX_LENGTHS_JMB_FRM_LEN_MSK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10256) TX_LENGTHS_CNT_DWN_VAL_MSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10258) tw32(MAC_TX_LENGTHS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10260) /* Receive rules. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10261) tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10262) tw32(RCVLPC_CONFIG, 0x0181);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10264) /* Calculate RDMAC_MODE setting early, we need it to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10265) * the RCVLPC_STATE_ENABLE mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10267) rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10268) RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10269) RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10270) RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10271) RDMAC_MODE_LNGREAD_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10273) if (tg3_asic_rev(tp) == ASIC_REV_5717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10274) rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10276) if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10277) tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10278) tg3_asic_rev(tp) == ASIC_REV_57780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10279) rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10280) RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10281) RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10283) if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10284) tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10285) if (tg3_flag(tp, TSO_CAPABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10286) tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10287) rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10288) } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10289) !tg3_flag(tp, IS_5788)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10290) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10294) if (tg3_flag(tp, PCI_EXPRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10295) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10297) if (tg3_asic_rev(tp) == ASIC_REV_57766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10298) tp->dma_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10299) if (tp->dev->mtu <= ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10300) rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10301) tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10305) if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10306) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10307) tg3_flag(tp, HW_TSO_3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10308) rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10310) if (tg3_flag(tp, 57765_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10311) tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10312) tg3_asic_rev(tp) == ASIC_REV_57780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10313) rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10315) if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10316) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10317) rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10319) if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10320) tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10321) tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10322) tg3_asic_rev(tp) == ASIC_REV_57780 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10323) tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10324) u32 tgtreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10326) if (tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10327) tgtreg = TG3_RDMA_RSRVCTRL_REG2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10328) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10329) tgtreg = TG3_RDMA_RSRVCTRL_REG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10331) val = tr32(tgtreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10332) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10333) tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10334) val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10335) TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10336) TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10337) val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10338) TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10339) TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10341) tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10344) if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10345) tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10346) tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10347) u32 tgtreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10349) if (tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10350) tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10352) tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10354) val = tr32(tgtreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10355) tw32(tgtreg, val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10356) TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10357) TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10360) /* Receive/send statistics. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10361) if (tg3_flag(tp, 5750_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10362) val = tr32(RCVLPC_STATS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10363) val &= ~RCVLPC_STATSENAB_DACK_FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10364) tw32(RCVLPC_STATS_ENABLE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10365) } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10366) tg3_flag(tp, TSO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10367) val = tr32(RCVLPC_STATS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10368) val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10369) tw32(RCVLPC_STATS_ENABLE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10370) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10371) tw32(RCVLPC_STATS_ENABLE, 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10373) tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10374) tw32(SNDDATAI_STATSENAB, 0xffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10375) tw32(SNDDATAI_STATSCTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10376) (SNDDATAI_SCTRL_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10377) SNDDATAI_SCTRL_FASTUPD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10379) /* Setup host coalescing engine. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10380) tw32(HOSTCC_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10381) for (i = 0; i < 2000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10382) if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10384) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10387) __tg3_set_coalesce(tp, &tp->coal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10389) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10390) /* Status/statistics block address. See tg3_timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10391) * the tg3_periodic_fetch_stats call there, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10392) * tg3_get_stats to see how this works for 5705/5750 chips.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10394) tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10395) ((u64) tp->stats_mapping >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10396) tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10397) ((u64) tp->stats_mapping & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10398) tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10400) tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10402) /* Clear statistics and status block memory areas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10403) for (i = NIC_SRAM_STATS_BLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10404) i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10405) i += sizeof(u32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10406) tg3_write_mem(tp, i, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10407) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10411) tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10413) tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10414) tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10415) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10416) tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10418) if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10419) tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10420) /* reset to prevent losing 1st rx packet intermittently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10421) tw32_f(MAC_RX_MODE, RX_MODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10422) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10425) tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10426) MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10427) MAC_MODE_FHDE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10428) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10429) tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10430) if (!tg3_flag(tp, 5705_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10431) !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10432) tg3_asic_rev(tp) != ASIC_REV_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10433) tp->mac_mode |= MAC_MODE_LINK_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10434) tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10435) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10437) /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10438) * If TG3_FLAG_IS_NIC is zero, we should read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10439) * register to preserve the GPIO settings for LOMs. The GPIOs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10440) * whether used as inputs or outputs, are set by boot code after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10441) * reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10443) if (!tg3_flag(tp, IS_NIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10444) u32 gpio_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10446) gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10447) GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10448) GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10450) if (tg3_asic_rev(tp) == ASIC_REV_5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10451) gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10452) GRC_LCLCTRL_GPIO_OUTPUT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10454) if (tg3_asic_rev(tp) == ASIC_REV_5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10455) gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10457) tp->grc_local_ctrl &= ~gpio_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10458) tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10460) /* GPIO1 must be driven high for eeprom write protect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10461) if (tg3_flag(tp, EEPROM_WRITE_PROT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10462) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10463) GRC_LCLCTRL_GPIO_OUTPUT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10465) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10466) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10468) if (tg3_flag(tp, USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10469) val = tr32(MSGINT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10470) val |= MSGINT_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10471) if (tp->irq_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10472) val |= MSGINT_MODE_MULTIVEC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10473) if (!tg3_flag(tp, 1SHOT_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10474) val |= MSGINT_MODE_ONE_SHOT_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10475) tw32(MSGINT_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10478) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10479) tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10480) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10483) val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10484) WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10485) WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10486) WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10487) WDMAC_MODE_LNGREAD_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10489) if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10490) tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10491) if (tg3_flag(tp, TSO_CAPABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10492) (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10493) tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10494) /* nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10495) } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10496) !tg3_flag(tp, IS_5788)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10497) val |= WDMAC_MODE_RX_ACCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10501) /* Enable host coalescing bug fix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10502) if (tg3_flag(tp, 5755_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10503) val |= WDMAC_MODE_STATUS_TAG_FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10505) if (tg3_asic_rev(tp) == ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10506) val |= WDMAC_MODE_BURST_ALL_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10508) tw32_f(WDMAC_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10509) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10511) if (tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10512) u16 pcix_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10514) pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10515) &pcix_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10516) if (tg3_asic_rev(tp) == ASIC_REV_5703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10517) pcix_cmd &= ~PCI_X_CMD_MAX_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10518) pcix_cmd |= PCI_X_CMD_READ_2K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10519) } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10520) pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10521) pcix_cmd |= PCI_X_CMD_READ_2K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10523) pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10524) pcix_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10527) tw32_f(RDMAC_MODE, rdmac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10528) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10530) if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10531) tg3_asic_rev(tp) == ASIC_REV_5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10532) for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10533) if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10536) if (i < TG3_NUM_RDMA_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10537) val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10538) val |= tg3_lso_rd_dma_workaround_bit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10539) tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10540) tg3_flag_set(tp, 5719_5720_RDMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10544) tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10545) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10546) tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10548) if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10549) tw32(SNDDATAC_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10550) SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10552) tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10554) tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10555) tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10556) val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10557) if (tg3_flag(tp, LRG_PROD_RING_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10558) val |= RCVDBDI_MODE_LRG_RING_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10559) tw32(RCVDBDI_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10560) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10561) if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10562) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10563) tg3_flag(tp, HW_TSO_3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10564) tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10565) val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10566) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10567) val |= SNDBDI_MODE_MULTI_TXQ_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10568) tw32(SNDBDI_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10569) tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10571) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10572) err = tg3_load_5701_a0_firmware_fix(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10573) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10574) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10577) if (tg3_asic_rev(tp) == ASIC_REV_57766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10578) /* Ignore any errors for the firmware download. If download
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10579) * fails, the device will operate with EEE disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10581) tg3_load_57766_firmware(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10584) if (tg3_flag(tp, TSO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10585) err = tg3_load_tso_firmware(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10586) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10587) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10590) tp->tx_mode = TX_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10592) if (tg3_flag(tp, 5755_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10593) tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10594) tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10596) if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10597) tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10598) val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10599) tp->tx_mode &= ~val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10600) tp->tx_mode |= tr32(MAC_TX_MODE) & val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10603) tw32_f(MAC_TX_MODE, tp->tx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10604) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10606) if (tg3_flag(tp, ENABLE_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10607) u32 rss_key[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10609) tg3_rss_write_indir_tbl(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10611) netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10613) for (i = 0; i < 10 ; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10614) tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10617) tp->rx_mode = RX_MODE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10618) if (tg3_flag(tp, 5755_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10619) tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10621) if (tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10622) tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10624) if (tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10625) tp->rx_mode |= RX_MODE_RSS_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10626) RX_MODE_RSS_ITBL_HASH_BITS_7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10627) RX_MODE_RSS_IPV6_HASH_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10628) RX_MODE_RSS_TCP_IPV6_HASH_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10629) RX_MODE_RSS_IPV4_HASH_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10630) RX_MODE_RSS_TCP_IPV4_HASH_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10632) tw32_f(MAC_RX_MODE, tp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10633) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10635) tw32(MAC_LED_CTRL, tp->led_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10637) tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10638) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10639) tw32_f(MAC_RX_MODE, RX_MODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10640) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10642) tw32_f(MAC_RX_MODE, tp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10643) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10645) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10646) if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10647) !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10648) /* Set drive transmission level to 1.2V */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10649) /* only if the signal pre-emphasis bit is not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10650) val = tr32(MAC_SERDES_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10651) val &= 0xfffff000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10652) val |= 0x880;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10653) tw32(MAC_SERDES_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10655) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10656) tw32(MAC_SERDES_CFG, 0x616000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10659) /* Prevent chip from dropping frames when flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10660) * is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10662) if (tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10663) val = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10664) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10665) val = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10666) tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10668) if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10669) (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10670) /* Use hardware link auto-negotiation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10671) tg3_flag_set(tp, HW_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10674) if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10675) tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10676) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10678) tmp = tr32(SERDES_RX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10679) tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10680) tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10681) tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10682) tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10685) if (!tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10686) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10687) tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10689) err = tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10690) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10691) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10693) if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10694) !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10695) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10697) /* Clear CRC stats. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10698) if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10699) tg3_writephy(tp, MII_TG3_TEST1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10700) tmp | MII_TG3_TEST1_CRC_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10701) tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10706) __tg3_set_rx_mode(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10708) /* Initialize receive rules. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10709) tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10710) tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10711) tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10712) tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10714) if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10715) limit = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10717) limit = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10718) if (tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10719) limit -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10720) switch (limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10721) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10722) tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10723) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10724) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10725) tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10726) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10727) case 14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10728) tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10729) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10730) case 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10731) tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10732) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10733) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10734) tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10735) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10736) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10737) tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10738) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10739) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10740) tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10741) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10742) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10743) tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10744) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10745) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10746) tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10747) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10748) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10749) tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10750) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10751) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10752) tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10753) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10754) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10755) tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10756) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10757) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10758) /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10759) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10760) /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10761) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10762) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10764) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10768) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10769) /* Write our heartbeat update interval to APE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10770) tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10771) APE_HOST_HEARTBEAT_INT_5SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10773) tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10778) /* Called at device open time to get the chip ready for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10779) * packet processing. Invoked with tp->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10781) static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10783) /* Chip may have been just powered on. If so, the boot code may still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10784) * be running initialization. Wait for it to finish to avoid races in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10785) * accessing the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10787) tg3_enable_register_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10788) tg3_poll_fw(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10790) tg3_switch_clocks(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10792) tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10794) return tg3_reset_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10797) #ifdef CONFIG_TIGON3_HWMON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10798) static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10800) u32 off, len = TG3_OCIR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10801) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10803) for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10804) tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10806) if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10807) !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10808) memset(ocir, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10812) /* sysfs attributes for hwmon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10813) static ssize_t tg3_show_temp(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10814) struct device_attribute *devattr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10816) struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10817) struct tg3 *tp = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10818) u32 temperature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10820) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10821) tg3_ape_scratchpad_read(tp, &temperature, attr->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10822) sizeof(temperature));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10823) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10824) return sprintf(buf, "%u\n", temperature * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10828) static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10829) TG3_TEMP_SENSOR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10830) static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10831) TG3_TEMP_CAUTION_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10832) static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10833) TG3_TEMP_MAX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10835) static struct attribute *tg3_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10836) &sensor_dev_attr_temp1_input.dev_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10837) &sensor_dev_attr_temp1_crit.dev_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10838) &sensor_dev_attr_temp1_max.dev_attr.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10839) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10840) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10841) ATTRIBUTE_GROUPS(tg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10843) static void tg3_hwmon_close(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10845) if (tp->hwmon_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10846) hwmon_device_unregister(tp->hwmon_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10847) tp->hwmon_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10851) static void tg3_hwmon_open(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10853) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10854) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10855) struct pci_dev *pdev = tp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10856) struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10858) tg3_sd_scan_scratchpad(tp, ocirs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10860) for (i = 0; i < TG3_SD_NUM_RECS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10861) if (!ocirs[i].src_data_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10862) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10864) size += ocirs[i].src_hdr_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10865) size += ocirs[i].src_data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10868) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10869) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10871) tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10872) tp, tg3_groups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10873) if (IS_ERR(tp->hwmon_dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10874) tp->hwmon_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10875) dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10878) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10879) static inline void tg3_hwmon_close(struct tg3 *tp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10880) static inline void tg3_hwmon_open(struct tg3 *tp) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10881) #endif /* CONFIG_TIGON3_HWMON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10884) #define TG3_STAT_ADD32(PSTAT, REG) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10885) do { u32 __val = tr32(REG); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10886) (PSTAT)->low += __val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10887) if ((PSTAT)->low < __val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10888) (PSTAT)->high += 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10889) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10891) static void tg3_periodic_fetch_stats(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10893) struct tg3_hw_stats *sp = tp->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10895) if (!tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10896) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10898) TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10899) TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10900) TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10901) TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10902) TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10903) TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10904) TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10905) TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10906) TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10907) TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10908) TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10909) TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10910) TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10911) if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10912) (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10913) sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10914) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10916) val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10917) val &= ~tg3_lso_rd_dma_workaround_bit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10918) tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10919) tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10922) TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10923) TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10924) TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10925) TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10926) TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10927) TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10928) TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10929) TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10930) TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10931) TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10932) TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10933) TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10934) TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10935) TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10937) TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10938) if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10939) tg3_asic_rev(tp) != ASIC_REV_5762 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10940) tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10941) tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10942) TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10943) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10944) u32 val = tr32(HOSTCC_FLOW_ATTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10945) val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10946) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10947) tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10948) sp->rx_discards.low += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10949) if (sp->rx_discards.low < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10950) sp->rx_discards.high += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10952) sp->mbuf_lwm_thresh_hit = sp->rx_discards;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10954) TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10957) static void tg3_chk_missed_msi(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10959) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10961) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10962) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10964) if (tg3_has_work(tnapi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10965) if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10966) tnapi->last_tx_cons == tnapi->tx_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10967) if (tnapi->chk_msi_cnt < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10968) tnapi->chk_msi_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10969) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10971) tg3_msi(0, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10974) tnapi->chk_msi_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10975) tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10976) tnapi->last_tx_cons = tnapi->tx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10980) static void tg3_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10982) struct tg3 *tp = from_timer(tp, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10984) spin_lock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10986) if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10987) spin_unlock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10988) goto restart_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10991) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10992) tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10993) tg3_chk_missed_msi(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10995) if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10996) /* BCM4785: Flush posted writes from GbE to host memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10997) tr32(HOSTCC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11000) if (!tg3_flag(tp, TAGGED_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11001) /* All of this garbage is because when using non-tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11002) * IRQ status the mailbox/status_block protocol the chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11003) * uses with the cpu is race prone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11005) if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11006) tw32(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11007) tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11008) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11009) tw32(HOSTCC_MODE, tp->coalesce_mode |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11010) HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11013) if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11014) spin_unlock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11015) tg3_reset_task_schedule(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11016) goto restart_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11020) /* This part only runs once per second. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11021) if (!--tp->timer_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11022) if (tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11023) tg3_periodic_fetch_stats(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11025) if (tp->setlpicnt && !--tp->setlpicnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11026) tg3_phy_eee_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11028) if (tg3_flag(tp, USE_LINKCHG_REG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11029) u32 mac_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11030) int phy_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11032) mac_stat = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11034) phy_event = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11035) if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11036) if (mac_stat & MAC_STATUS_MI_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11037) phy_event = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11038) } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11039) phy_event = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11041) if (phy_event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11042) tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11043) } else if (tg3_flag(tp, POLL_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11044) u32 mac_stat = tr32(MAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11045) int need_setup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11047) if (tp->link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11048) (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11049) need_setup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11051) if (!tp->link_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11052) (mac_stat & (MAC_STATUS_PCS_SYNCED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11053) MAC_STATUS_SIGNAL_DET))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11054) need_setup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11056) if (need_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11057) if (!tp->serdes_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11058) tw32_f(MAC_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11059) (tp->mac_mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11060) ~MAC_MODE_PORT_MODE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11061) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11062) tw32_f(MAC_MODE, tp->mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11063) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11065) tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11067) } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11068) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11069) tg3_serdes_parallel_detect(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11070) } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11071) u32 cpmu = tr32(TG3_CPMU_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11072) bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11073) TG3_CPMU_STATUS_LINK_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11075) if (link_up != tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11076) tg3_setup_phy(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11079) tp->timer_counter = tp->timer_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11082) /* Heartbeat is only sent once every 2 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11084) * The heartbeat is to tell the ASF firmware that the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11085) * driver is still alive. In the event that the OS crashes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11086) * ASF needs to reset the hardware to free up the FIFO space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11087) * that may be filled with rx packets destined for the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11088) * If the FIFO is full, ASF will no longer function properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11090) * Unintended resets have been reported on real time kernels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11091) * where the timer doesn't run on time. Netpoll will also have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11092) * same problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11093) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11094) * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11095) * to check the ring condition when the heartbeat is expiring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11096) * before doing the reset. This will prevent most unintended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11097) * resets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11099) if (!--tp->asf_counter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11100) if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11101) tg3_wait_for_event_ack(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11103) tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11104) FWCMD_NICDRV_ALIVE3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11105) tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11106) tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11107) TG3_FW_UPDATE_TIMEOUT_SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11109) tg3_generate_fw_event(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11111) tp->asf_counter = tp->asf_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11114) /* Update the APE heartbeat every 5 seconds.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11115) tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11117) spin_unlock(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11119) restart_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11120) tp->timer.expires = jiffies + tp->timer_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11121) add_timer(&tp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11124) static void tg3_timer_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11126) if (tg3_flag(tp, TAGGED_STATUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11127) tg3_asic_rev(tp) != ASIC_REV_5717 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11128) !tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11129) tp->timer_offset = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11131) tp->timer_offset = HZ / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11133) BUG_ON(tp->timer_offset > HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11135) tp->timer_multiplier = (HZ / tp->timer_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11136) tp->asf_multiplier = (HZ / tp->timer_offset) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11137) TG3_FW_UPDATE_FREQ_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11139) timer_setup(&tp->timer, tg3_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11142) static void tg3_timer_start(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11144) tp->asf_counter = tp->asf_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11145) tp->timer_counter = tp->timer_multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11147) tp->timer.expires = jiffies + tp->timer_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11148) add_timer(&tp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11151) static void tg3_timer_stop(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11153) del_timer_sync(&tp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11156) /* Restart hardware after configuration changes, self-test, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11157) * Invoked with tp->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11159) static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11160) __releases(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11161) __acquires(tp->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11163) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11165) err = tg3_init_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11166) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11167) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11168) "Failed to re-initialize device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11169) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11170) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11171) tg3_timer_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11172) tp->irq_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11173) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11174) dev_close(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11175) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11177) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11180) static void tg3_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11182) struct tg3 *tp = container_of(work, struct tg3, reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11183) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11185) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11186) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11188) if (!netif_running(tp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11189) tg3_flag_clear(tp, RESET_TASK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11190) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11191) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11195) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11197) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11199) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11201) tg3_full_lock(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11203) if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11204) tp->write32_tx_mbox = tg3_write32_tx_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11205) tp->write32_rx_mbox = tg3_write_flush_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11206) tg3_flag_set(tp, MBOX_WRITE_REORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11207) tg3_flag_clear(tp, TX_RECOVERY_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11210) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11211) err = tg3_init_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11212) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11213) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11214) tp->irq_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11215) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11216) /* Clear this flag so that tg3_reset_task_cancel() will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11217) * call cancel_work_sync() and wait forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11219) tg3_flag_clear(tp, RESET_TASK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11220) dev_close(tp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11221) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11224) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11226) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11228) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11229) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11231) tg3_flag_clear(tp, RESET_TASK_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11232) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11233) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11236) static int tg3_request_irq(struct tg3 *tp, int irq_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11238) irq_handler_t fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11239) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11240) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11241) struct tg3_napi *tnapi = &tp->napi[irq_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11243) if (tp->irq_cnt == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11244) name = tp->dev->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11245) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11246) name = &tnapi->irq_lbl[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11247) if (tnapi->tx_buffers && tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11248) snprintf(name, IFNAMSIZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11249) "%s-txrx-%d", tp->dev->name, irq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11250) else if (tnapi->tx_buffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11251) snprintf(name, IFNAMSIZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11252) "%s-tx-%d", tp->dev->name, irq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11253) else if (tnapi->rx_rcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11254) snprintf(name, IFNAMSIZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11255) "%s-rx-%d", tp->dev->name, irq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11256) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11257) snprintf(name, IFNAMSIZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11258) "%s-%d", tp->dev->name, irq_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11259) name[IFNAMSIZ-1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11262) if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11263) fn = tg3_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11264) if (tg3_flag(tp, 1SHOT_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11265) fn = tg3_msi_1shot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11266) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11267) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11268) fn = tg3_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11269) if (tg3_flag(tp, TAGGED_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11270) fn = tg3_interrupt_tagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11271) flags = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11274) return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11277) static int tg3_test_interrupt(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11279) struct tg3_napi *tnapi = &tp->napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11280) struct net_device *dev = tp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11281) int err, i, intr_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11282) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11284) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11285) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11287) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11289) free_irq(tnapi->irq_vec, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11292) * Turn off MSI one shot mode. Otherwise this test has no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11293) * observable way to know whether the interrupt was delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11295) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11296) val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11297) tw32(MSGINT_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11300) err = request_irq(tnapi->irq_vec, tg3_test_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11301) IRQF_SHARED, dev->name, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11302) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11303) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11305) tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11306) tg3_enable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11308) tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11309) tnapi->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11311) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11312) u32 int_mbox, misc_host_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11314) int_mbox = tr32_mailbox(tnapi->int_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11315) misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11317) if ((int_mbox != 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11318) (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11319) intr_ok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11323) if (tg3_flag(tp, 57765_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11324) tnapi->hw_status->status_tag != tnapi->last_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11325) tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11327) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11330) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11332) free_irq(tnapi->irq_vec, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11334) err = tg3_request_irq(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11336) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11337) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11339) if (intr_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11340) /* Reenable MSI one shot mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11341) if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11342) val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11343) tw32(MSGINT_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11348) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11351) /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11352) * successfully restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11354) static int tg3_test_msi(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11356) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11357) u16 pci_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11359) if (!tg3_flag(tp, USING_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11360) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11362) /* Turn off SERR reporting in case MSI terminates with Master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11363) * Abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11365) pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11366) pci_write_config_word(tp->pdev, PCI_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11367) pci_cmd & ~PCI_COMMAND_SERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11369) err = tg3_test_interrupt(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11371) pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11373) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11376) /* other failures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11377) if (err != -EIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11378) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11380) /* MSI test failed, go back to INTx mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11381) netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11382) "to INTx mode. Please report this failure to the PCI "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11383) "maintainer and include system chipset information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11385) free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11387) pci_disable_msi(tp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11389) tg3_flag_clear(tp, USING_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11390) tp->napi[0].irq_vec = tp->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11392) err = tg3_request_irq(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11393) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11396) /* Need to reset the chip because the MSI cycle may have terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11397) * with Master Abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11399) tg3_full_lock(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11401) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11402) err = tg3_init_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11404) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11407) free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11409) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11412) static int tg3_request_firmware(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11414) const struct tg3_firmware_hdr *fw_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11416) if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11417) netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11418) tp->fw_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11419) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11422) fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11424) /* Firmware blob starts with version numbers, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11425) * start address and _full_ length including BSS sections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11426) * (which must be longer than the actual data, of course
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11429) tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11430) if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11431) netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11432) tp->fw_len, tp->fw_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11433) release_firmware(tp->fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11434) tp->fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11435) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11438) /* We no longer need firmware; we have it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11439) tp->fw_needed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11443) static u32 tg3_irq_count(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11445) u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11447) if (irq_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11448) /* We want as many rx rings enabled as there are cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11449) * In multiqueue MSI-X mode, the first MSI-X vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11450) * only deals with link interrupts, etc, so we add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11451) * one to the number of vectors we are requesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11453) irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11456) return irq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11459) static bool tg3_enable_msix(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11461) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11462) struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11464) tp->txq_cnt = tp->txq_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11465) tp->rxq_cnt = tp->rxq_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11466) if (!tp->rxq_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11467) tp->rxq_cnt = netif_get_num_default_rss_queues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11468) if (tp->rxq_cnt > tp->rxq_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11469) tp->rxq_cnt = tp->rxq_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11471) /* Disable multiple TX rings by default. Simple round-robin hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11472) * scheduling of the TX rings can cause starvation of rings with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11473) * small packets when other rings have TSO or jumbo packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11475) if (!tp->txq_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11476) tp->txq_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11478) tp->irq_cnt = tg3_irq_count(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11480) for (i = 0; i < tp->irq_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11481) msix_ent[i].entry = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11482) msix_ent[i].vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11485) rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11486) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11487) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11488) } else if (rc < tp->irq_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11489) netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11490) tp->irq_cnt, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11491) tp->irq_cnt = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11492) tp->rxq_cnt = max(rc - 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11493) if (tp->txq_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11494) tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11497) for (i = 0; i < tp->irq_max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11498) tp->napi[i].irq_vec = msix_ent[i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11500) if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11501) pci_disable_msix(tp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11502) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11505) if (tp->irq_cnt == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11506) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11508) tg3_flag_set(tp, ENABLE_RSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11510) if (tp->txq_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11511) tg3_flag_set(tp, ENABLE_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11513) netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11515) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11518) static void tg3_ints_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11520) if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11521) !tg3_flag(tp, TAGGED_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11522) /* All MSI supporting chips should support tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11523) * status. Assert that this is the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11525) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11526) "MSI without TAGGED_STATUS? Not using MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11527) goto defcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11530) if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11531) tg3_flag_set(tp, USING_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11532) else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11533) tg3_flag_set(tp, USING_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11535) if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11536) u32 msi_mode = tr32(MSGINT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11537) if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11538) msi_mode |= MSGINT_MODE_MULTIVEC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11539) if (!tg3_flag(tp, 1SHOT_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11540) msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11541) tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11543) defcfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11544) if (!tg3_flag(tp, USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11545) tp->irq_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11546) tp->napi[0].irq_vec = tp->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11549) if (tp->irq_cnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11550) tp->txq_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11551) tp->rxq_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11552) netif_set_real_num_tx_queues(tp->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11553) netif_set_real_num_rx_queues(tp->dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11557) static void tg3_ints_fini(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11559) if (tg3_flag(tp, USING_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11560) pci_disable_msix(tp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11561) else if (tg3_flag(tp, USING_MSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11562) pci_disable_msi(tp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11563) tg3_flag_clear(tp, USING_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11564) tg3_flag_clear(tp, USING_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11565) tg3_flag_clear(tp, ENABLE_RSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11566) tg3_flag_clear(tp, ENABLE_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11569) static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11570) bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11572) struct net_device *dev = tp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11573) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11576) * Setup interrupts first so we know how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11577) * many NAPI resources to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11579) tg3_ints_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11581) tg3_rss_check_indir_tbl(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11583) /* The placement of this call is tied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11584) * to the setup and use of Host TX descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11586) err = tg3_alloc_consistent(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11587) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11588) goto out_ints_fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11590) tg3_napi_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11592) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11594) for (i = 0; i < tp->irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11595) err = tg3_request_irq(tp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11596) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11597) for (i--; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11598) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11600) free_irq(tnapi->irq_vec, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11602) goto out_napi_fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11606) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11608) if (init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11609) tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11611) err = tg3_init_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11612) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11613) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11614) tg3_free_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11617) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11619) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11620) goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11622) if (test_irq && tg3_flag(tp, USING_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11623) err = tg3_test_msi(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11625) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11626) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11627) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11628) tg3_free_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11629) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11631) goto out_napi_fini;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11634) if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11635) u32 val = tr32(PCIE_TRANSACTION_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11637) tw32(PCIE_TRANSACTION_CFG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11638) val | PCIE_TRANS_CFG_1SHOT_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11642) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11644) tg3_hwmon_open(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11646) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11648) tg3_timer_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11649) tg3_flag_set(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11650) tg3_enable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11652) tg3_ptp_resume(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11654) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11656) netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11659) * Reset loopback feature if it was turned on while the device was down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11660) * make sure that it's installed properly now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11662) if (dev->features & NETIF_F_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11663) tg3_set_loopback(dev, dev->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11667) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11668) for (i = tp->irq_cnt - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11669) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11670) free_irq(tnapi->irq_vec, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11673) out_napi_fini:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11674) tg3_napi_disable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11675) tg3_napi_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11676) tg3_free_consistent(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11678) out_ints_fini:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11679) tg3_ints_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11681) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11684) static void tg3_stop(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11686) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11688) tg3_reset_task_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11689) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11691) tg3_timer_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11693) tg3_hwmon_close(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11695) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11697) tg3_full_lock(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11699) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11701) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11702) tg3_free_rings(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11703) tg3_flag_clear(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11705) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11707) for (i = tp->irq_cnt - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11708) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11709) free_irq(tnapi->irq_vec, tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11712) tg3_ints_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11714) tg3_napi_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11716) tg3_free_consistent(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11719) static int tg3_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11721) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11722) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11724) if (tp->pcierr_recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11725) netdev_err(dev, "Failed to open device. PCI error recovery "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11726) "in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11727) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11730) if (tp->fw_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11731) err = tg3_request_firmware(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11732) if (tg3_asic_rev(tp) == ASIC_REV_57766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11733) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11734) netdev_warn(tp->dev, "EEE capability disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11735) tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11736) } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11737) netdev_warn(tp->dev, "EEE capability restored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11738) tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11740) } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11741) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11742) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11743) } else if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11744) netdev_warn(tp->dev, "TSO capability disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11745) tg3_flag_clear(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11746) } else if (!tg3_flag(tp, TSO_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11747) netdev_notice(tp->dev, "TSO capability restored\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11748) tg3_flag_set(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11752) tg3_carrier_off(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11754) err = tg3_power_up(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11755) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11756) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11758) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11760) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11761) tg3_flag_clear(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11763) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11765) err = tg3_start(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11766) !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11767) true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11768) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11769) tg3_frob_aux_power(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11770) pci_set_power_state(tp->pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11773) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11776) static int tg3_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11778) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11780) if (tp->pcierr_recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11781) netdev_err(dev, "Failed to close device. PCI error recovery "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11782) "in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11783) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11786) tg3_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11788) if (pci_device_is_present(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11789) tg3_power_down_prepare(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11791) tg3_carrier_off(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11796) static inline u64 get_stat64(tg3_stat64_t *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11798) return ((u64)val->high << 32) | ((u64)val->low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11801) static u64 tg3_calc_crc_errors(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11803) struct tg3_hw_stats *hw_stats = tp->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11805) if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11806) (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11807) tg3_asic_rev(tp) == ASIC_REV_5701)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11808) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11810) if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11811) tg3_writephy(tp, MII_TG3_TEST1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11812) val | MII_TG3_TEST1_CRC_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11813) tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11814) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11815) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11817) tp->phy_crc_errors += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11819) return tp->phy_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11822) return get_stat64(&hw_stats->rx_fcs_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11825) #define ESTAT_ADD(member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11826) estats->member = old_estats->member + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11827) get_stat64(&hw_stats->member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11829) static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11831) struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11832) struct tg3_hw_stats *hw_stats = tp->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11834) ESTAT_ADD(rx_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11835) ESTAT_ADD(rx_fragments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11836) ESTAT_ADD(rx_ucast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11837) ESTAT_ADD(rx_mcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11838) ESTAT_ADD(rx_bcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11839) ESTAT_ADD(rx_fcs_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11840) ESTAT_ADD(rx_align_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11841) ESTAT_ADD(rx_xon_pause_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11842) ESTAT_ADD(rx_xoff_pause_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11843) ESTAT_ADD(rx_mac_ctrl_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11844) ESTAT_ADD(rx_xoff_entered);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11845) ESTAT_ADD(rx_frame_too_long_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11846) ESTAT_ADD(rx_jabbers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11847) ESTAT_ADD(rx_undersize_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11848) ESTAT_ADD(rx_in_length_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11849) ESTAT_ADD(rx_out_length_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11850) ESTAT_ADD(rx_64_or_less_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11851) ESTAT_ADD(rx_65_to_127_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11852) ESTAT_ADD(rx_128_to_255_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11853) ESTAT_ADD(rx_256_to_511_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11854) ESTAT_ADD(rx_512_to_1023_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11855) ESTAT_ADD(rx_1024_to_1522_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11856) ESTAT_ADD(rx_1523_to_2047_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11857) ESTAT_ADD(rx_2048_to_4095_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11858) ESTAT_ADD(rx_4096_to_8191_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11859) ESTAT_ADD(rx_8192_to_9022_octet_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11861) ESTAT_ADD(tx_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11862) ESTAT_ADD(tx_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11863) ESTAT_ADD(tx_xon_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11864) ESTAT_ADD(tx_xoff_sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11865) ESTAT_ADD(tx_flow_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11866) ESTAT_ADD(tx_mac_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11867) ESTAT_ADD(tx_single_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11868) ESTAT_ADD(tx_mult_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11869) ESTAT_ADD(tx_deferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11870) ESTAT_ADD(tx_excessive_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11871) ESTAT_ADD(tx_late_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11872) ESTAT_ADD(tx_collide_2times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11873) ESTAT_ADD(tx_collide_3times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11874) ESTAT_ADD(tx_collide_4times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11875) ESTAT_ADD(tx_collide_5times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11876) ESTAT_ADD(tx_collide_6times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11877) ESTAT_ADD(tx_collide_7times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11878) ESTAT_ADD(tx_collide_8times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11879) ESTAT_ADD(tx_collide_9times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11880) ESTAT_ADD(tx_collide_10times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11881) ESTAT_ADD(tx_collide_11times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11882) ESTAT_ADD(tx_collide_12times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11883) ESTAT_ADD(tx_collide_13times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11884) ESTAT_ADD(tx_collide_14times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11885) ESTAT_ADD(tx_collide_15times);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11886) ESTAT_ADD(tx_ucast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11887) ESTAT_ADD(tx_mcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11888) ESTAT_ADD(tx_bcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11889) ESTAT_ADD(tx_carrier_sense_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11890) ESTAT_ADD(tx_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11891) ESTAT_ADD(tx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11893) ESTAT_ADD(dma_writeq_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11894) ESTAT_ADD(dma_write_prioq_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11895) ESTAT_ADD(rxbds_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11896) ESTAT_ADD(rx_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11897) ESTAT_ADD(rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11898) ESTAT_ADD(rx_threshold_hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11900) ESTAT_ADD(dma_readq_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11901) ESTAT_ADD(dma_read_prioq_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11902) ESTAT_ADD(tx_comp_queue_full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11904) ESTAT_ADD(ring_set_send_prod_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11905) ESTAT_ADD(ring_status_update);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11906) ESTAT_ADD(nic_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11907) ESTAT_ADD(nic_avoided_irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11908) ESTAT_ADD(nic_tx_threshold_hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11910) ESTAT_ADD(mbuf_lwm_thresh_hit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11913) static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11915) struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11916) struct tg3_hw_stats *hw_stats = tp->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11918) stats->rx_packets = old_stats->rx_packets +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11919) get_stat64(&hw_stats->rx_ucast_packets) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11920) get_stat64(&hw_stats->rx_mcast_packets) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11921) get_stat64(&hw_stats->rx_bcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11923) stats->tx_packets = old_stats->tx_packets +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11924) get_stat64(&hw_stats->tx_ucast_packets) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11925) get_stat64(&hw_stats->tx_mcast_packets) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11926) get_stat64(&hw_stats->tx_bcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11928) stats->rx_bytes = old_stats->rx_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11929) get_stat64(&hw_stats->rx_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11930) stats->tx_bytes = old_stats->tx_bytes +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11931) get_stat64(&hw_stats->tx_octets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11933) stats->rx_errors = old_stats->rx_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11934) get_stat64(&hw_stats->rx_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11935) stats->tx_errors = old_stats->tx_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11936) get_stat64(&hw_stats->tx_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11937) get_stat64(&hw_stats->tx_mac_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11938) get_stat64(&hw_stats->tx_carrier_sense_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11939) get_stat64(&hw_stats->tx_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11941) stats->multicast = old_stats->multicast +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11942) get_stat64(&hw_stats->rx_mcast_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11943) stats->collisions = old_stats->collisions +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11944) get_stat64(&hw_stats->tx_collisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11946) stats->rx_length_errors = old_stats->rx_length_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11947) get_stat64(&hw_stats->rx_frame_too_long_errors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11948) get_stat64(&hw_stats->rx_undersize_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11950) stats->rx_frame_errors = old_stats->rx_frame_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11951) get_stat64(&hw_stats->rx_align_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11952) stats->tx_aborted_errors = old_stats->tx_aborted_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11953) get_stat64(&hw_stats->tx_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11954) stats->tx_carrier_errors = old_stats->tx_carrier_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11955) get_stat64(&hw_stats->tx_carrier_sense_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11957) stats->rx_crc_errors = old_stats->rx_crc_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11958) tg3_calc_crc_errors(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11960) stats->rx_missed_errors = old_stats->rx_missed_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11961) get_stat64(&hw_stats->rx_discards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11963) stats->rx_dropped = tp->rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11964) stats->tx_dropped = tp->tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11967) static int tg3_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11969) return TG3_REG_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11972) static void tg3_get_regs(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11973) struct ethtool_regs *regs, void *_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11975) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11977) regs->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11979) memset(_p, 0, TG3_REG_BLK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11981) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11982) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11984) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11986) tg3_dump_legacy_regs(tp, (u32 *)_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11988) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11991) static int tg3_get_eeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11993) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11995) return tp->nvram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11998) static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12000) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12001) int ret, cpmu_restore = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12002) u8 *pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12003) u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12004) __be32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12006) if (tg3_flag(tp, NO_NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12007) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12009) offset = eeprom->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12010) len = eeprom->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12011) eeprom->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12013) eeprom->magic = TG3_EEPROM_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12015) /* Override clock, link aware and link idle modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12016) if (tg3_flag(tp, CPMU_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12017) cpmu_val = tr32(TG3_CPMU_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12018) if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12019) CPMU_CTRL_LINK_IDLE_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12020) tw32(TG3_CPMU_CTRL, cpmu_val &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12021) ~(CPMU_CTRL_LINK_AWARE_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12022) CPMU_CTRL_LINK_IDLE_MODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12023) cpmu_restore = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12026) tg3_override_clk(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12028) if (offset & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12029) /* adjustments to start on required 4 byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12030) b_offset = offset & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12031) b_count = 4 - b_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12032) if (b_count > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12033) /* i.e. offset=1 len=2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12034) b_count = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12036) ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12037) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12038) goto eeprom_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12039) memcpy(data, ((char *)&val) + b_offset, b_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12040) len -= b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12041) offset += b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12042) eeprom->len += b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12045) /* read bytes up to the last 4 byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12046) pd = &data[eeprom->len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12047) for (i = 0; i < (len - (len & 3)); i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12048) ret = tg3_nvram_read_be32(tp, offset + i, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12049) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12050) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12051) i -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12052) eeprom->len += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12053) goto eeprom_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12055) memcpy(pd + i, &val, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12056) if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12057) if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12058) eeprom->len += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12059) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12060) goto eeprom_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12062) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12065) eeprom->len += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12067) if (len & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12068) /* read last bytes not ending on 4 byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12069) pd = &data[eeprom->len];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12070) b_count = len & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12071) b_offset = offset + len - b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12072) ret = tg3_nvram_read_be32(tp, b_offset, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12073) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12074) goto eeprom_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12075) memcpy(pd, &val, b_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12076) eeprom->len += b_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12078) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12080) eeprom_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12081) /* Restore clock, link aware and link idle modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12082) tg3_restore_clk(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12083) if (cpmu_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12084) tw32(TG3_CPMU_CTRL, cpmu_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12086) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12089) static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12091) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12092) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12093) u32 offset, len, b_offset, odd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12094) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12095) __be32 start = 0, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12097) if (tg3_flag(tp, NO_NVRAM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12098) eeprom->magic != TG3_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12099) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12101) offset = eeprom->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12102) len = eeprom->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12104) if ((b_offset = (offset & 3))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12105) /* adjustments to start on required 4 byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12106) ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12107) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12108) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12109) len += b_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12110) offset &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12111) if (len < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12112) len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12115) odd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12116) if (len & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12117) /* adjustments to end on required 4 byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12118) odd_len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12119) len = (len + 3) & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12120) ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12121) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12125) buf = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12126) if (b_offset || odd_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12127) buf = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12128) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12129) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12130) if (b_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12131) memcpy(buf, &start, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12132) if (odd_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12133) memcpy(buf+len-4, &end, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12134) memcpy(buf + b_offset, data, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12137) ret = tg3_nvram_write_block(tp, offset, len, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12139) if (buf != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12140) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12145) static int tg3_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12146) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12148) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12149) u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12151) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12152) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12153) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12154) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12155) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12156) phy_ethtool_ksettings_get(phydev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12161) supported = (SUPPORTED_Autoneg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12163) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12164) supported |= (SUPPORTED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12165) SUPPORTED_1000baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12167) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12168) supported |= (SUPPORTED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12169) SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12170) SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12171) SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12172) SUPPORTED_TP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12173) cmd->base.port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12174) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12175) supported |= SUPPORTED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12176) cmd->base.port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12178) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12179) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12181) advertising = tp->link_config.advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12182) if (tg3_flag(tp, PAUSE_AUTONEG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12183) if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12184) if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12185) advertising |= ADVERTISED_Pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12187) advertising |= ADVERTISED_Pause |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12188) ADVERTISED_Asym_Pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12190) } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12191) advertising |= ADVERTISED_Asym_Pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12194) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12195) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12197) if (netif_running(dev) && tp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12198) cmd->base.speed = tp->link_config.active_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12199) cmd->base.duplex = tp->link_config.active_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12200) ethtool_convert_legacy_u32_to_link_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12201) cmd->link_modes.lp_advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12202) tp->link_config.rmt_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12204) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12205) if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12206) cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12208) cmd->base.eth_tp_mdix = ETH_TP_MDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12210) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12211) cmd->base.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12212) cmd->base.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12213) cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12215) cmd->base.phy_address = tp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12216) cmd->base.autoneg = tp->link_config.autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12220) static int tg3_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12221) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12223) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12224) u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12225) u32 advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12227) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12228) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12229) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12230) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12231) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12232) return phy_ethtool_ksettings_set(phydev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12235) if (cmd->base.autoneg != AUTONEG_ENABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12236) cmd->base.autoneg != AUTONEG_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12237) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12239) if (cmd->base.autoneg == AUTONEG_DISABLE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12240) cmd->base.duplex != DUPLEX_FULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12241) cmd->base.duplex != DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12242) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12244) ethtool_convert_link_mode_to_legacy_u32(&advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12245) cmd->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12247) if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12248) u32 mask = ADVERTISED_Autoneg |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12249) ADVERTISED_Pause |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12250) ADVERTISED_Asym_Pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12252) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12253) mask |= ADVERTISED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12254) ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12256) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12257) mask |= ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12258) ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12259) ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12260) ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12261) ADVERTISED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12263) mask |= ADVERTISED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12265) if (advertising & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12266) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12268) mask &= (ADVERTISED_1000baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12269) ADVERTISED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12270) ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12271) ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12272) ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12273) ADVERTISED_10baseT_Full);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12275) advertising &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12276) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12277) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12278) if (speed != SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12279) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12281) if (cmd->base.duplex != DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12284) if (speed != SPEED_100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12285) speed != SPEED_10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12286) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12290) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12292) tp->link_config.autoneg = cmd->base.autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12293) if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12294) tp->link_config.advertising = (advertising |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12295) ADVERTISED_Autoneg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12296) tp->link_config.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12297) tp->link_config.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12299) tp->link_config.advertising = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12300) tp->link_config.speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12301) tp->link_config.duplex = cmd->base.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12304) tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12306) tg3_warn_mgmt_link_flap(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12308) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12309) tg3_setup_phy(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12311) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12316) static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12318) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12320) strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12321) strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12322) strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12325) static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12327) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12329) if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12330) wol->supported = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12332) wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12333) wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12334) if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12335) wol->wolopts = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12336) memset(&wol->sopass, 0, sizeof(wol->sopass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12339) static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12341) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12342) struct device *dp = &tp->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12344) if (wol->wolopts & ~WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12345) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12346) if ((wol->wolopts & WAKE_MAGIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12347) !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12348) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12350) device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12352) if (device_may_wakeup(dp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12353) tg3_flag_set(tp, WOL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12355) tg3_flag_clear(tp, WOL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12360) static u32 tg3_get_msglevel(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12362) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12363) return tp->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12366) static void tg3_set_msglevel(struct net_device *dev, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12368) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12369) tp->msg_enable = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12372) static int tg3_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12374) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12375) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12377) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12378) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12380) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12383) tg3_warn_mgmt_link_flap(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12385) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12386) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12387) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12388) r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12390) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12392) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12393) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12394) tg3_readphy(tp, MII_BMCR, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12395) if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12396) ((bmcr & BMCR_ANENABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12397) (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12398) tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12399) BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12400) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12402) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12405) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12408) static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12410) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12412) ering->rx_max_pending = tp->rx_std_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12413) if (tg3_flag(tp, JUMBO_RING_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12414) ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12416) ering->rx_jumbo_max_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12418) ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12420) ering->rx_pending = tp->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12421) if (tg3_flag(tp, JUMBO_RING_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12422) ering->rx_jumbo_pending = tp->rx_jumbo_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12423) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12424) ering->rx_jumbo_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12426) ering->tx_pending = tp->napi[0].tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12429) static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12431) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12432) int i, irq_sync = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12433) bool reset_phy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12435) if ((ering->rx_pending > tp->rx_std_ring_mask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12436) (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12437) (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12438) (ering->tx_pending <= MAX_SKB_FRAGS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12439) (tg3_flag(tp, TSO_BUG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12440) (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12441) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12443) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12444) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12445) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12446) irq_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12449) tg3_full_lock(tp, irq_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12451) tp->rx_pending = ering->rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12453) if (tg3_flag(tp, MAX_RXPEND_64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12454) tp->rx_pending > 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12455) tp->rx_pending = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12457) if (tg3_flag(tp, JUMBO_RING_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12458) tp->rx_jumbo_pending = ering->rx_jumbo_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12460) for (i = 0; i < tp->irq_max; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12461) tp->napi[i].tx_pending = ering->tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12463) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12464) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12465) /* Reset PHY to avoid PHY lock up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12466) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12467) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12468) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12469) reset_phy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12471) err = tg3_restart_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12472) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12473) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12476) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12478) if (irq_sync && !err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12479) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12481) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12484) static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12486) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12488) epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12490) if (tp->link_config.flowctrl & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12491) epause->rx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12492) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12493) epause->rx_pause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12495) if (tp->link_config.flowctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12496) epause->tx_pause = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12497) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12498) epause->tx_pause = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12501) static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12503) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12504) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12505) bool reset_phy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12507) if (tp->link_config.autoneg == AUTONEG_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12508) tg3_warn_mgmt_link_flap(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12510) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12511) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12513) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12515) if (!phy_validate_pause(phydev, epause))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12516) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12518) tp->link_config.flowctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12519) phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12520) if (epause->rx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12521) tp->link_config.flowctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12523) if (epause->tx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12524) tp->link_config.flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12526) } else if (epause->tx_pause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12527) tp->link_config.flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12530) if (epause->autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12531) tg3_flag_set(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12533) tg3_flag_clear(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12535) if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12536) if (phydev->autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12537) /* phy_set_asym_pause() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12538) * renegotiate the link to inform our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12539) * link partner of our flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12540) * settings, even if the flow control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12541) * is forced. Let tg3_adjust_link()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12542) * do the final flow control setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12547) if (!epause->autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12548) tg3_setup_flow_control(tp, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12551) int irq_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12553) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12554) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12555) irq_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12558) tg3_full_lock(tp, irq_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12560) if (epause->autoneg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12561) tg3_flag_set(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12562) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12563) tg3_flag_clear(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12564) if (epause->rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12565) tp->link_config.flowctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12566) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12567) tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12568) if (epause->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12569) tp->link_config.flowctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12570) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12571) tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12573) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12574) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12575) /* Reset PHY to avoid PHY lock up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12576) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12577) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12578) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12579) reset_phy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12581) err = tg3_restart_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12582) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12583) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12586) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12589) tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12591) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12594) static int tg3_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12596) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12597) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12598) return TG3_NUM_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12599) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12600) return TG3_NUM_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12601) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12602) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12606) static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12607) u32 *rules __always_unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12609) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12611) if (!tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12612) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12614) switch (info->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12615) case ETHTOOL_GRXRINGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12616) if (netif_running(tp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12617) info->data = tp->rxq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12618) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12619) info->data = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12620) if (info->data > TG3_RSS_MAX_NUM_QS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12621) info->data = TG3_RSS_MAX_NUM_QS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12626) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12627) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12631) static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12633) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12634) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12636) if (tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12637) size = TG3_RSS_INDIR_TBL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12639) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12642) static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12644) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12645) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12647) if (hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12648) *hfunc = ETH_RSS_HASH_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12649) if (!indir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12652) for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12653) indir[i] = tp->rss_ind_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12655) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12658) static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12659) const u8 hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12661) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12662) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12664) /* We require at least one supported parameter to be changed and no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12665) * change in any of the unsupported parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12667) if (key ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12668) (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12669) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12671) if (!indir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12674) for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12675) tp->rss_ind_tbl[i] = indir[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12677) if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12678) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12680) /* It is legal to write the indirection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12681) * table while the device is running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12683) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12684) tg3_rss_write_indir_tbl(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12685) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12687) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12690) static void tg3_get_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12691) struct ethtool_channels *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12693) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12694) u32 deflt_qs = netif_get_num_default_rss_queues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12696) channel->max_rx = tp->rxq_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12697) channel->max_tx = tp->txq_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12699) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12700) channel->rx_count = tp->rxq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12701) channel->tx_count = tp->txq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12702) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12703) if (tp->rxq_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12704) channel->rx_count = tp->rxq_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12705) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12706) channel->rx_count = min(deflt_qs, tp->rxq_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12708) if (tp->txq_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12709) channel->tx_count = tp->txq_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12710) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12711) channel->tx_count = min(deflt_qs, tp->txq_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12715) static int tg3_set_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12716) struct ethtool_channels *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12718) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12720) if (!tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12721) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12723) if (channel->rx_count > tp->rxq_max ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12724) channel->tx_count > tp->txq_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12725) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12727) tp->rxq_req = channel->rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12728) tp->txq_req = channel->tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12730) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12733) tg3_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12735) tg3_carrier_off(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12737) tg3_start(tp, true, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12742) static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12744) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12745) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12746) memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12748) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12749) memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12751) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12752) WARN_ON(1); /* we need a WARN() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12757) static int tg3_set_phys_id(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12758) enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12760) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12762) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12763) case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12764) return 1; /* cycle on/off once per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12766) case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12767) tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12768) LED_CTRL_1000MBPS_ON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12769) LED_CTRL_100MBPS_ON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12770) LED_CTRL_10MBPS_ON |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12771) LED_CTRL_TRAFFIC_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12772) LED_CTRL_TRAFFIC_BLINK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12773) LED_CTRL_TRAFFIC_LED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12774) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12776) case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12777) tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12778) LED_CTRL_TRAFFIC_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12781) case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12782) tw32(MAC_LED_CTRL, tp->led_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12789) static void tg3_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12790) struct ethtool_stats *estats, u64 *tmp_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12792) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12794) if (tp->hw_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12795) tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12796) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12797) memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12800) static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12802) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12803) __be32 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12804) u32 offset = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12805) u32 magic, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12807) if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12808) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12810) if (magic == TG3_EEPROM_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12811) for (offset = TG3_NVM_DIR_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12812) offset < TG3_NVM_DIR_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12813) offset += TG3_NVM_DIRENT_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12814) if (tg3_nvram_read(tp, offset, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12815) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12817) if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12818) TG3_NVM_DIRTYPE_EXTVPD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12822) if (offset != TG3_NVM_DIR_END) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12823) len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12824) if (tg3_nvram_read(tp, offset + 4, &offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12825) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12827) offset = tg3_nvram_logical_addr(tp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12831) if (!offset || !len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12832) offset = TG3_NVM_VPD_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12833) len = TG3_NVM_VPD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12836) buf = kmalloc(len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12837) if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12838) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12840) if (magic == TG3_EEPROM_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12841) for (i = 0; i < len; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12842) /* The data is in little-endian format in NVRAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12843) * Use the big-endian read routines to preserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12844) * the byte order as it exists in NVRAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12846) if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12847) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12850) u8 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12851) ssize_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12852) unsigned int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12854) ptr = (u8 *)&buf[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12855) for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12856) cnt = pci_read_vpd(tp->pdev, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12857) len - pos, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12858) if (cnt == -ETIMEDOUT || cnt == -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12859) cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12860) else if (cnt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12861) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12863) if (pos != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12864) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12867) *vpdlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12869) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12871) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12872) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12873) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12876) #define NVRAM_TEST_SIZE 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12877) #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12878) #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12879) #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12880) #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12881) #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12882) #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12883) #define NVRAM_SELFBOOT_HW_SIZE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12884) #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12886) static int tg3_test_nvram(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12888) u32 csum, magic, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12889) __be32 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12890) int i, j, k, err = 0, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12892) if (tg3_flag(tp, NO_NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12893) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12895) if (tg3_nvram_read(tp, 0, &magic) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12896) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12898) if (magic == TG3_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12899) size = NVRAM_TEST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12900) else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12901) if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12902) TG3_EEPROM_SB_FORMAT_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12903) switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12904) case TG3_EEPROM_SB_REVISION_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12905) size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12907) case TG3_EEPROM_SB_REVISION_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12908) size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12910) case TG3_EEPROM_SB_REVISION_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12911) size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12912) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12913) case TG3_EEPROM_SB_REVISION_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12914) size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12916) case TG3_EEPROM_SB_REVISION_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12917) size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12919) case TG3_EEPROM_SB_REVISION_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12920) size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12922) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12923) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12925) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12926) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12927) } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12928) size = NVRAM_SELFBOOT_HW_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12929) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12930) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12932) buf = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12933) if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12934) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12936) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12937) for (i = 0, j = 0; i < size; i += 4, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12938) err = tg3_nvram_read_be32(tp, i, &buf[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12939) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12940) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12942) if (i < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12943) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12945) /* Selfboot format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12946) magic = be32_to_cpu(buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12947) if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12948) TG3_EEPROM_MAGIC_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12949) u8 *buf8 = (u8 *) buf, csum8 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12951) if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12952) TG3_EEPROM_SB_REVISION_2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12953) /* For rev 2, the csum doesn't include the MBA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12954) for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12955) csum8 += buf8[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12956) for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12957) csum8 += buf8[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12959) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12960) csum8 += buf8[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12963) if (csum8 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12964) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12965) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12968) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12972) if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12973) TG3_EEPROM_MAGIC_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12974) u8 data[NVRAM_SELFBOOT_DATA_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12975) u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12976) u8 *buf8 = (u8 *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12978) /* Separate the parity bits and the data bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12979) for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12980) if ((i == 0) || (i == 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12981) int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12982) u8 msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12984) for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12985) parity[k++] = buf8[i] & msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12986) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12987) } else if (i == 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12988) int l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12989) u8 msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12991) for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12992) parity[k++] = buf8[i] & msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12993) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12995) for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12996) parity[k++] = buf8[i] & msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12997) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12999) data[j++] = buf8[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13002) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13003) for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13004) u8 hw8 = hweight8(data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13006) if ((hw8 & 0x1) && parity[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13007) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13008) else if (!(hw8 & 0x1) && !parity[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13009) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13011) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13012) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13015) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13017) /* Bootstrap checksum at offset 0x10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13018) csum = calc_crc((unsigned char *) buf, 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13019) if (csum != le32_to_cpu(buf[0x10/4]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13020) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13022) /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13023) csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13024) if (csum != le32_to_cpu(buf[0xfc/4]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13025) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13027) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13029) buf = tg3_vpd_readblock(tp, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13030) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13031) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13033) i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13034) if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13035) j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13036) if (j < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13037) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13039) if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13040) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13042) i += PCI_VPD_LRDT_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13043) j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13044) PCI_VPD_RO_KEYWORD_CHKSUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13045) if (j > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13046) u8 csum8 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13048) j += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13050) for (i = 0; i <= j; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13051) csum8 += ((u8 *)buf)[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13053) if (csum8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13054) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13058) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13060) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13061) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13062) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13065) #define TG3_SERDES_TIMEOUT_SEC 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13066) #define TG3_COPPER_TIMEOUT_SEC 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13068) static int tg3_test_link(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13070) int i, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13072) if (!netif_running(tp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13073) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13075) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13076) max = TG3_SERDES_TIMEOUT_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13077) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13078) max = TG3_COPPER_TIMEOUT_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13080) for (i = 0; i < max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13081) if (tp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13084) if (msleep_interruptible(1000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13085) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13088) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13091) /* Only test the commonly used registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13092) static int tg3_test_registers(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13094) int i, is_5705, is_5750;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13095) u32 offset, read_mask, write_mask, val, save_val, read_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13096) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13097) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13098) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13099) #define TG3_FL_5705 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13100) #define TG3_FL_NOT_5705 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13101) #define TG3_FL_NOT_5788 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13102) #define TG3_FL_NOT_5750 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13103) u32 read_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13104) u32 write_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13105) } reg_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13106) /* MAC Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13107) { MAC_MODE, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13108) 0x00000000, 0x00ef6f8c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13109) { MAC_MODE, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13110) 0x00000000, 0x01ef6b8c },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13111) { MAC_STATUS, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13112) 0x03800107, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13113) { MAC_STATUS, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13114) 0x03800100, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13115) { MAC_ADDR_0_HIGH, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13116) 0x00000000, 0x0000ffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13117) { MAC_ADDR_0_LOW, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13118) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13119) { MAC_RX_MTU_SIZE, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13120) 0x00000000, 0x0000ffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13121) { MAC_TX_MODE, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13122) 0x00000000, 0x00000070 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13123) { MAC_TX_LENGTHS, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13124) 0x00000000, 0x00003fff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13125) { MAC_RX_MODE, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13126) 0x00000000, 0x000007fc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13127) { MAC_RX_MODE, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13128) 0x00000000, 0x000007dc },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13129) { MAC_HASH_REG_0, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13130) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13131) { MAC_HASH_REG_1, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13132) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13133) { MAC_HASH_REG_2, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13134) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13135) { MAC_HASH_REG_3, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13136) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13138) /* Receive Data and Receive BD Initiator Control Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13139) { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13140) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13141) { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13142) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13143) { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13144) 0x00000000, 0x00000003 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13145) { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13146) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13147) { RCVDBDI_STD_BD+0, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13148) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13149) { RCVDBDI_STD_BD+4, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13150) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13151) { RCVDBDI_STD_BD+8, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13152) 0x00000000, 0xffff0002 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13153) { RCVDBDI_STD_BD+0xc, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13154) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13156) /* Receive BD Initiator Control Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13157) { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13158) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13159) { RCVBDI_STD_THRESH, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13160) 0x00000000, 0x000003ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13161) { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13162) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13164) /* Host Coalescing Control Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13165) { HOSTCC_MODE, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13166) 0x00000000, 0x00000004 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13167) { HOSTCC_MODE, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13168) 0x00000000, 0x000000f6 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13169) { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13170) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13171) { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13172) 0x00000000, 0x000003ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13173) { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13174) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13175) { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13176) 0x00000000, 0x000003ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13177) { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13178) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13179) { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13180) 0x00000000, 0x000000ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13181) { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13182) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13183) { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13184) 0x00000000, 0x000000ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13185) { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13186) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13187) { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13188) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13189) { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13190) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13191) { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13192) 0x00000000, 0x000000ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13193) { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13194) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13195) { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13196) 0x00000000, 0x000000ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13197) { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13198) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13199) { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13200) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13201) { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13202) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13203) { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13204) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13205) { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13206) 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13207) { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13208) 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13209) { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13210) 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13212) /* Buffer Manager Control Registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13213) { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13214) 0x00000000, 0x007fff80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13215) { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13216) 0x00000000, 0x007fffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13217) { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13218) 0x00000000, 0x0000003f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13219) { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13220) 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13221) { BUFMGR_MB_HIGH_WATER, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13222) 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13223) { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13224) 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13225) { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13226) 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13228) /* Mailbox Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13229) { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13230) 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13231) { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13232) 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13233) { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13234) 0x00000000, 0x000007ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13235) { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13236) 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13238) { 0xffff, 0x0000, 0x00000000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13241) is_5705 = is_5750 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13242) if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13243) is_5705 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13244) if (tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13245) is_5750 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13248) for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13249) if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13250) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13252) if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13253) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13255) if (tg3_flag(tp, IS_5788) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13256) (reg_tbl[i].flags & TG3_FL_NOT_5788))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13257) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13259) if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13260) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13262) offset = (u32) reg_tbl[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13263) read_mask = reg_tbl[i].read_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13264) write_mask = reg_tbl[i].write_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13266) /* Save the original register content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13267) save_val = tr32(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13269) /* Determine the read-only value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13270) read_val = save_val & read_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13272) /* Write zero to the register, then make sure the read-only bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13273) * are not changed and the read/write bits are all zeros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13275) tw32(offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13277) val = tr32(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13279) /* Test the read-only and read/write bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13280) if (((val & read_mask) != read_val) || (val & write_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13281) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13283) /* Write ones to all the bits defined by RdMask and WrMask, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13284) * make sure the read-only bits are not changed and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13285) * read/write bits are all ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13287) tw32(offset, read_mask | write_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13289) val = tr32(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13291) /* Test the read-only bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13292) if ((val & read_mask) != read_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13293) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13295) /* Test the read/write bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13296) if ((val & write_mask) != write_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13297) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13299) tw32(offset, save_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13304) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13305) if (netif_msg_hw(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13306) netdev_err(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13307) "Register test failed at offset %x\n", offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13308) tw32(offset, save_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13309) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13312) static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13314) static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13315) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13316) u32 j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13318) for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13319) for (j = 0; j < len; j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13320) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13322) tg3_write_mem(tp, offset + j, test_pattern[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13323) tg3_read_mem(tp, offset + j, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13324) if (val != test_pattern[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13325) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13331) static int tg3_test_memory(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13333) static struct mem_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13334) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13335) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13336) } mem_tbl_570x[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13337) { 0x00000000, 0x00b50},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13338) { 0x00002000, 0x1c000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13339) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13340) }, mem_tbl_5705[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13341) { 0x00000100, 0x0000c},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13342) { 0x00000200, 0x00008},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13343) { 0x00004000, 0x00800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13344) { 0x00006000, 0x01000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13345) { 0x00008000, 0x02000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13346) { 0x00010000, 0x0e000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13347) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13348) }, mem_tbl_5755[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13349) { 0x00000200, 0x00008},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13350) { 0x00004000, 0x00800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13351) { 0x00006000, 0x00800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13352) { 0x00008000, 0x02000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13353) { 0x00010000, 0x0c000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13354) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13355) }, mem_tbl_5906[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13356) { 0x00000200, 0x00008},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13357) { 0x00004000, 0x00400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13358) { 0x00006000, 0x00400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13359) { 0x00008000, 0x01000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13360) { 0x00010000, 0x01000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13361) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13362) }, mem_tbl_5717[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13363) { 0x00000200, 0x00008},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13364) { 0x00010000, 0x0a000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13365) { 0x00020000, 0x13c00},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13366) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13367) }, mem_tbl_57765[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13368) { 0x00000200, 0x00008},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13369) { 0x00004000, 0x00800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13370) { 0x00006000, 0x09800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13371) { 0x00010000, 0x0a000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13372) { 0xffffffff, 0x00000}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13374) struct mem_entry *mem_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13375) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13376) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13378) if (tg3_flag(tp, 5717_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13379) mem_tbl = mem_tbl_5717;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13380) else if (tg3_flag(tp, 57765_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13381) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13382) mem_tbl = mem_tbl_57765;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13383) else if (tg3_flag(tp, 5755_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13384) mem_tbl = mem_tbl_5755;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13385) else if (tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13386) mem_tbl = mem_tbl_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13387) else if (tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13388) mem_tbl = mem_tbl_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13390) mem_tbl = mem_tbl_570x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13392) for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13393) err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13394) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13398) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13401) #define TG3_TSO_MSS 500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13403) #define TG3_TSO_IP_HDR_LEN 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13404) #define TG3_TSO_TCP_HDR_LEN 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13405) #define TG3_TSO_TCP_OPT_LEN 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13407) static const u8 tg3_tso_header[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13408) 0x08, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13409) 0x45, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13410) 0x00, 0x00, 0x40, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13411) 0x40, 0x06, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13412) 0x0a, 0x00, 0x00, 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13413) 0x0a, 0x00, 0x00, 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13414) 0x0d, 0x00, 0xe0, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13415) 0x00, 0x00, 0x01, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13416) 0x00, 0x00, 0x02, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13417) 0x80, 0x10, 0x10, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13418) 0x14, 0x09, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13419) 0x01, 0x01, 0x08, 0x0a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13420) 0x11, 0x11, 0x11, 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13421) 0x11, 0x11, 0x11, 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13424) static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13426) u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13427) u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13428) u32 budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13429) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13430) u8 *tx_data, *rx_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13431) dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13432) int num_pkts, tx_len, rx_len, i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13433) struct tg3_rx_buffer_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13434) struct tg3_napi *tnapi, *rnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13435) struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13437) tnapi = &tp->napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13438) rnapi = &tp->napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13439) if (tp->irq_cnt > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13440) if (tg3_flag(tp, ENABLE_RSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13441) rnapi = &tp->napi[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13442) if (tg3_flag(tp, ENABLE_TSS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13443) tnapi = &tp->napi[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13445) coal_now = tnapi->coal_now | rnapi->coal_now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13447) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13449) tx_len = pktsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13450) skb = netdev_alloc_skb(tp->dev, tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13451) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13452) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13454) tx_data = skb_put(skb, tx_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13455) memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13456) memset(tx_data + ETH_ALEN, 0x0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13458) tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13460) if (tso_loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13461) struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13463) u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13464) TG3_TSO_TCP_OPT_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13466) memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13467) sizeof(tg3_tso_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13468) mss = TG3_TSO_MSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13470) val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13471) num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13473) /* Set the total length field in the IP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13474) iph->tot_len = htons((u16)(mss + hdr_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13476) base_flags = (TXD_FLAG_CPU_PRE_DMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13477) TXD_FLAG_CPU_POST_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13479) if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13480) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13481) tg3_flag(tp, HW_TSO_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13482) struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13483) val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13484) th = (struct tcphdr *)&tx_data[val];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13485) th->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13486) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13487) base_flags |= TXD_FLAG_TCPUDP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13489) if (tg3_flag(tp, HW_TSO_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13490) mss |= (hdr_len & 0xc) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13491) if (hdr_len & 0x10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13492) base_flags |= 0x00000010;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13493) base_flags |= (hdr_len & 0x3e0) << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13494) } else if (tg3_flag(tp, HW_TSO_2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13495) mss |= hdr_len << 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13496) else if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13497) tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13498) mss |= (TG3_TSO_TCP_OPT_LEN << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13499) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13500) base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13503) data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13505) num_pkts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13506) data_off = ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13508) if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13509) tx_len > VLAN_ETH_FRAME_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13510) base_flags |= TXD_FLAG_JMB_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13513) for (i = data_off; i < tx_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13514) tx_data[i] = (u8) (i & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13516) map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13517) if (pci_dma_mapping_error(tp->pdev, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13518) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13519) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13522) val = tnapi->tx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13523) tnapi->tx_buffers[val].skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13524) dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13526) tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13527) rnapi->coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13529) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13531) rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13533) budget = tg3_tx_avail(tnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13534) if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13535) base_flags | TXD_FLAG_END, mss, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13536) tnapi->tx_buffers[val].skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13537) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13538) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13541) tnapi->tx_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13543) /* Sync BD data before updating mailbox */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13544) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13546) tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13547) tr32_mailbox(tnapi->prodmbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13549) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13551) /* 350 usec to allow enough time on some 10/100 Mbps devices. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13552) for (i = 0; i < 35; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13553) tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13554) coal_now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13556) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13558) tx_idx = tnapi->hw_status->idx[0].tx_consumer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13559) rx_idx = rnapi->hw_status->idx[0].rx_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13560) if ((tx_idx == tnapi->tx_prod) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13561) (rx_idx == (rx_start_idx + num_pkts)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13562) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13565) tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13566) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13568) if (tx_idx != tnapi->tx_prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13569) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13571) if (rx_idx != rx_start_idx + num_pkts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13572) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13574) val = data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13575) while (rx_idx != rx_start_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13576) desc = &rnapi->rx_rcb[rx_start_idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13577) desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13578) opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13580) if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13581) (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13582) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13584) rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13585) - ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13587) if (!tso_loopback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13588) if (rx_len != tx_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13589) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13591) if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13592) if (opaque_key != RXD_OPAQUE_RING_STD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13595) if (opaque_key != RXD_OPAQUE_RING_JUMBO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13596) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13598) } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13599) (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13600) >> RXD_TCPCSUM_SHIFT != 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13601) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13604) if (opaque_key == RXD_OPAQUE_RING_STD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13605) rx_data = tpr->rx_std_buffers[desc_idx].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13606) map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13607) mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13608) } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13609) rx_data = tpr->rx_jmb_buffers[desc_idx].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13610) map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13611) mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13612) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13613) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13615) pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13616) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13618) rx_data += TG3_RX_OFFSET(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13619) for (i = data_off; i < rx_len; i++, val++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13620) if (*(rx_data + i) != (u8) (val & 0xff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13621) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13625) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13627) /* tg3_free_rings will unmap and free the rx_data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13628) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13629) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13632) #define TG3_STD_LOOPBACK_FAILED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13633) #define TG3_JMB_LOOPBACK_FAILED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13634) #define TG3_TSO_LOOPBACK_FAILED 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13635) #define TG3_LOOPBACK_FAILED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13636) (TG3_STD_LOOPBACK_FAILED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13637) TG3_JMB_LOOPBACK_FAILED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13638) TG3_TSO_LOOPBACK_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13640) static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13642) int err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13643) u32 eee_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13644) u32 jmb_pkt_sz = 9000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13646) if (tp->dma_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13647) jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13649) eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13650) tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13652) if (!netif_running(tp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13653) data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13654) data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13655) if (do_extlpbk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13656) data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13657) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13660) err = tg3_reset_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13661) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13662) data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13663) data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13664) if (do_extlpbk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13665) data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13666) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13669) if (tg3_flag(tp, ENABLE_RSS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13670) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13672) /* Reroute all rx packets to the 1st queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13673) for (i = MAC_RSS_INDIR_TBL_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13674) i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13675) tw32(i, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13678) /* HW errata - mac loopback fails in some cases on 5780.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13679) * Normal traffic and PHY loopback are not affected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13680) * errata. Also, the MAC loopback test is deprecated for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13681) * all newer ASIC revisions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13683) if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13684) !tg3_flag(tp, CPMU_PRESENT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13685) tg3_mac_loopback(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13687) if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13688) data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13690) if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13691) tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13692) data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13694) tg3_mac_loopback(tp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13697) if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13698) !tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13699) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13701) tg3_phy_lpbk_set(tp, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13703) /* Wait for link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13704) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13705) if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13707) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13710) if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13711) data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13712) if (tg3_flag(tp, TSO_CAPABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13713) tg3_run_loopback(tp, ETH_FRAME_LEN, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13714) data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13715) if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13716) tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13717) data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13719) if (do_extlpbk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13720) tg3_phy_lpbk_set(tp, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13722) /* All link indications report up, but the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13723) * isn't really ready for about 20 msec. Double it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13724) * to be sure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13726) mdelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13728) if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13729) data[TG3_EXT_LOOPB_TEST] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13730) TG3_STD_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13731) if (tg3_flag(tp, TSO_CAPABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13732) tg3_run_loopback(tp, ETH_FRAME_LEN, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13733) data[TG3_EXT_LOOPB_TEST] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13734) TG3_TSO_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13735) if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13736) tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13737) data[TG3_EXT_LOOPB_TEST] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13738) TG3_JMB_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13741) /* Re-enable gphy autopowerdown. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13742) if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13743) tg3_phy_toggle_apd(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13746) err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13747) data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13749) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13750) tp->phy_flags |= eee_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13752) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13755) static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13756) u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13758) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13759) bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13761) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13762) if (tg3_power_up(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13763) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13764) memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13767) tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13770) memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13772) if (tg3_test_nvram(tp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13773) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13774) data[TG3_NVRAM_TEST] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13776) if (!doextlpbk && tg3_test_link(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13777) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13778) data[TG3_LINK_TEST] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13780) if (etest->flags & ETH_TEST_FL_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13781) int err, err2 = 0, irq_sync = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13783) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13784) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13785) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13786) irq_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13789) tg3_full_lock(tp, irq_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13790) tg3_halt(tp, RESET_KIND_SUSPEND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13791) err = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13792) tg3_halt_cpu(tp, RX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13793) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13794) tg3_halt_cpu(tp, TX_CPU_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13795) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13796) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13798) if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13799) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13801) if (tg3_test_registers(tp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13802) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13803) data[TG3_REGISTER_TEST] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13806) if (tg3_test_memory(tp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13807) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13808) data[TG3_MEMORY_TEST] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13811) if (doextlpbk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13812) etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13814) if (tg3_test_loopback(tp, data, doextlpbk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13815) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13817) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13819) if (tg3_test_interrupt(tp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13820) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13821) data[TG3_INTERRUPT_TEST] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13824) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13826) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13827) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13828) tg3_flag_set(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13829) err2 = tg3_restart_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13830) if (!err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13831) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13834) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13836) if (irq_sync && !err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13837) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13839) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13840) tg3_power_down_prepare(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13844) static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13846) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13847) struct hwtstamp_config stmpconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13849) if (!tg3_flag(tp, PTP_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13850) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13852) if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13853) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13855) if (stmpconf.flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13856) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13858) if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13859) stmpconf.tx_type != HWTSTAMP_TX_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13860) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13862) switch (stmpconf.rx_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13863) case HWTSTAMP_FILTER_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13864) tp->rxptpctl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13866) case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13867) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13868) TG3_RX_PTP_CTL_ALL_V1_EVENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13870) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13871) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13872) TG3_RX_PTP_CTL_SYNC_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13874) case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13875) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13876) TG3_RX_PTP_CTL_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13878) case HWTSTAMP_FILTER_PTP_V2_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13879) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13880) TG3_RX_PTP_CTL_ALL_V2_EVENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13882) case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13883) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13884) TG3_RX_PTP_CTL_ALL_V2_EVENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13886) case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13887) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13888) TG3_RX_PTP_CTL_ALL_V2_EVENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13890) case HWTSTAMP_FILTER_PTP_V2_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13891) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13892) TG3_RX_PTP_CTL_SYNC_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13894) case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13895) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13896) TG3_RX_PTP_CTL_SYNC_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13898) case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13899) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13900) TG3_RX_PTP_CTL_SYNC_EVNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13902) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13903) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13904) TG3_RX_PTP_CTL_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13905) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13906) case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13907) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13908) TG3_RX_PTP_CTL_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13909) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13910) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13911) tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13912) TG3_RX_PTP_CTL_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13913) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13914) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13915) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13918) if (netif_running(dev) && tp->rxptpctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13919) tw32(TG3_RX_PTP_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13920) tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13922) if (stmpconf.tx_type == HWTSTAMP_TX_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13923) tg3_flag_set(tp, TX_TSTAMP_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13924) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13925) tg3_flag_clear(tp, TX_TSTAMP_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13927) return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13928) -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13931) static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13933) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13934) struct hwtstamp_config stmpconf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13936) if (!tg3_flag(tp, PTP_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13937) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13939) stmpconf.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13940) stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13941) HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13943) switch (tp->rxptpctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13944) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13945) stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13947) case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13948) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13950) case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13951) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13952) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13953) case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13954) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13956) case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13957) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13959) case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13960) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13962) case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13963) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13965) case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13966) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13968) case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13969) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13971) case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13972) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13974) case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13975) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13977) case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13978) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13980) case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13981) stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13983) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13984) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13985) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13988) return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13989) -EFAULT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13992) static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13994) struct mii_ioctl_data *data = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13995) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13996) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13998) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13999) struct phy_device *phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14000) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14001) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14002) phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14003) return phy_mii_ioctl(phydev, ifr, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14006) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14007) case SIOCGMIIPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14008) data->phy_id = tp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14010) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14011) case SIOCGMIIREG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14012) u32 mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14014) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14015) break; /* We have no PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14017) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14018) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14020) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14021) err = __tg3_readphy(tp, data->phy_id & 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14022) data->reg_num & 0x1f, &mii_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14023) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14025) data->val_out = mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14027) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14030) case SIOCSMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14031) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14032) break; /* We have no PHY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14034) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14035) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14037) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14038) err = __tg3_writephy(tp, data->phy_id & 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14039) data->reg_num & 0x1f, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14040) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14042) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14044) case SIOCSHWTSTAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14045) return tg3_hwtstamp_set(dev, ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14047) case SIOCGHWTSTAMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14048) return tg3_hwtstamp_get(dev, ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14050) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14051) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14054) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14057) static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14059) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14061) memcpy(ec, &tp->coal, sizeof(*ec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14062) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14065) static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14067) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14068) u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14069) u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14071) if (!tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14072) max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14073) max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14074) max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14075) min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14078) if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14079) (!ec->rx_coalesce_usecs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14080) (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14081) (!ec->tx_coalesce_usecs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14082) (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14083) (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14084) (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14085) (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14086) (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14087) (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14088) (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14089) (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14090) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14092) /* Only copy relevant parameters, ignore all others. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14093) tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14094) tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14095) tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14096) tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14097) tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14098) tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14099) tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14100) tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14101) tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14103) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14104) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14105) __tg3_set_coalesce(tp, &tp->coal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14106) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14111) static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14113) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14115) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14116) netdev_warn(tp->dev, "Board does not support EEE!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14117) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14120) if (edata->advertised != tp->eee.advertised) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14121) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14122) "Direct manipulation of EEE advertisement is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14126) if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14127) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14128) "Maximal Tx Lpi timer supported is %#x(u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14129) TG3_CPMU_DBTMR1_LNKIDLE_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14130) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14133) tp->eee = *edata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14135) tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14136) tg3_warn_mgmt_link_flap(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14138) if (netif_running(tp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14139) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14140) tg3_setup_eee(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14141) tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14142) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14148) static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14150) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14152) if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14153) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14154) "Board does not support EEE!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14155) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14158) *edata = tp->eee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14159) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14162) static const struct ethtool_ops tg3_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14163) .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14164) ETHTOOL_COALESCE_MAX_FRAMES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14165) ETHTOOL_COALESCE_USECS_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14166) ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14167) ETHTOOL_COALESCE_STATS_BLOCK_USECS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14168) .get_drvinfo = tg3_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14169) .get_regs_len = tg3_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14170) .get_regs = tg3_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14171) .get_wol = tg3_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14172) .set_wol = tg3_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14173) .get_msglevel = tg3_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14174) .set_msglevel = tg3_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14175) .nway_reset = tg3_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14176) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14177) .get_eeprom_len = tg3_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14178) .get_eeprom = tg3_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14179) .set_eeprom = tg3_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14180) .get_ringparam = tg3_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14181) .set_ringparam = tg3_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14182) .get_pauseparam = tg3_get_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14183) .set_pauseparam = tg3_set_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14184) .self_test = tg3_self_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14185) .get_strings = tg3_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14186) .set_phys_id = tg3_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14187) .get_ethtool_stats = tg3_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14188) .get_coalesce = tg3_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14189) .set_coalesce = tg3_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14190) .get_sset_count = tg3_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14191) .get_rxnfc = tg3_get_rxnfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14192) .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14193) .get_rxfh = tg3_get_rxfh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14194) .set_rxfh = tg3_set_rxfh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14195) .get_channels = tg3_get_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14196) .set_channels = tg3_set_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14197) .get_ts_info = tg3_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14198) .get_eee = tg3_get_eee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14199) .set_eee = tg3_set_eee,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14200) .get_link_ksettings = tg3_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14201) .set_link_ksettings = tg3_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14204) static void tg3_get_stats64(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14205) struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14207) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14209) spin_lock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14210) if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14211) *stats = tp->net_stats_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14212) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14213) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14216) tg3_get_nstats(tp, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14217) spin_unlock_bh(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14220) static void tg3_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14222) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14224) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14227) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14228) __tg3_set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14229) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14232) static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14233) int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14235) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14237) if (new_mtu > ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14238) if (tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14239) netdev_update_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14240) tg3_flag_clear(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14242) tg3_flag_set(tp, JUMBO_RING_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14245) if (tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14246) tg3_flag_set(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14247) netdev_update_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14249) tg3_flag_clear(tp, JUMBO_RING_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14253) static int tg3_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14255) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14256) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14257) bool reset_phy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14259) if (!netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14260) /* We'll just catch it later when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14261) * device is up'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14263) tg3_set_mtu(dev, tp, new_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14264) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14267) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14269) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14271) tg3_set_mtu(dev, tp, new_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14273) tg3_full_lock(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14275) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14277) /* Reset PHY, otherwise the read DMA engine will be in a mode that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14278) * breaks all requests to 256 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14280) if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14281) tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14282) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14283) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14284) reset_phy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14286) err = tg3_restart_hw(tp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14288) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14289) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14291) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14293) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14294) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14299) static const struct net_device_ops tg3_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14300) .ndo_open = tg3_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14301) .ndo_stop = tg3_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14302) .ndo_start_xmit = tg3_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14303) .ndo_get_stats64 = tg3_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14304) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14305) .ndo_set_rx_mode = tg3_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14306) .ndo_set_mac_address = tg3_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14307) .ndo_do_ioctl = tg3_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14308) .ndo_tx_timeout = tg3_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14309) .ndo_change_mtu = tg3_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14310) .ndo_fix_features = tg3_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14311) .ndo_set_features = tg3_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14312) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14313) .ndo_poll_controller = tg3_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14314) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14315) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14317) static void tg3_get_eeprom_size(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14319) u32 cursize, val, magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14321) tp->nvram_size = EEPROM_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14323) if (tg3_nvram_read(tp, 0, &magic) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14324) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14326) if ((magic != TG3_EEPROM_MAGIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14327) ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14328) ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14332) * Size the chip by reading offsets at increasing powers of two.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14333) * When we encounter our validation signature, we know the addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14334) * has wrapped around, and thus have our chip size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14336) cursize = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14338) while (cursize < tp->nvram_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14339) if (tg3_nvram_read(tp, cursize, &val) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14340) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14342) if (val == magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14345) cursize <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14348) tp->nvram_size = cursize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14351) static void tg3_get_nvram_size(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14353) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14355) if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14358) /* Selfboot format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14359) if (val != TG3_EEPROM_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14360) tg3_get_eeprom_size(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14361) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14364) if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14365) if (val != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14366) /* This is confusing. We want to operate on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14367) * 16-bit value at offset 0xf2. The tg3_nvram_read()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14368) * call will read from NVRAM and byteswap the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14369) * according to the byteswapping settings for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14370) * other register accesses. This ensures the data we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14371) * want will always reside in the lower 16-bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14372) * However, the data in NVRAM is in LE format, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14373) * means the data from the NVRAM read will always be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14374) * opposite the endianness of the CPU. The 16-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14375) * byteswap then brings the data to CPU endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14377) tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14381) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14384) static void tg3_get_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14386) u32 nvcfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14388) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14389) if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14390) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14392) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14393) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14396) if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14397) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14398) switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14399) case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14400) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14401) tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14402) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14404) case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14405) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14406) tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14408) case FLASH_VENDOR_ATMEL_EEPROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14409) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14410) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14411) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14412) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14413) case FLASH_VENDOR_ST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14414) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14415) tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14416) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14418) case FLASH_VENDOR_SAIFUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14419) tp->nvram_jedecnum = JEDEC_SAIFUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14420) tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14422) case FLASH_VENDOR_SST_SMALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14423) case FLASH_VENDOR_SST_LARGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14424) tp->nvram_jedecnum = JEDEC_SST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14425) tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14428) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14429) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14430) tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14431) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14435) static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14437) switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14438) case FLASH_5752PAGE_SIZE_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14439) tp->nvram_pagesize = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14441) case FLASH_5752PAGE_SIZE_512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14442) tp->nvram_pagesize = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14444) case FLASH_5752PAGE_SIZE_1K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14445) tp->nvram_pagesize = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14447) case FLASH_5752PAGE_SIZE_2K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14448) tp->nvram_pagesize = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14449) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14450) case FLASH_5752PAGE_SIZE_4K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14451) tp->nvram_pagesize = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14453) case FLASH_5752PAGE_SIZE_264:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14454) tp->nvram_pagesize = 264;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14455) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14456) case FLASH_5752PAGE_SIZE_528:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14457) tp->nvram_pagesize = 528;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14458) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14462) static void tg3_get_5752_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14464) u32 nvcfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14466) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14468) /* NVRAM protection for TPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14469) if (nvcfg1 & (1 << 27))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14470) tg3_flag_set(tp, PROTECTED_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14472) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14473) case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14474) case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14475) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14476) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14478) case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14479) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14480) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14481) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14483) case FLASH_5752VENDOR_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14484) case FLASH_5752VENDOR_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14485) case FLASH_5752VENDOR_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14486) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14487) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14488) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14492) if (tg3_flag(tp, FLASH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14493) tg3_nvram_get_pagesize(tp, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14495) /* For eeprom, set pagesize to maximum eeprom size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14496) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14498) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14499) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14503) static void tg3_get_5755_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14505) u32 nvcfg1, protect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14507) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14509) /* NVRAM protection for TPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14510) if (nvcfg1 & (1 << 27)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14511) tg3_flag_set(tp, PROTECTED_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14512) protect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14515) nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14516) switch (nvcfg1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14517) case FLASH_5755VENDOR_ATMEL_FLASH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14518) case FLASH_5755VENDOR_ATMEL_FLASH_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14519) case FLASH_5755VENDOR_ATMEL_FLASH_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14520) case FLASH_5755VENDOR_ATMEL_FLASH_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14521) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14522) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14523) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14524) tp->nvram_pagesize = 264;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14525) if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14526) nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14527) tp->nvram_size = (protect ? 0x3e200 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14528) TG3_NVRAM_SIZE_512KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14529) else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14530) tp->nvram_size = (protect ? 0x1f200 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14531) TG3_NVRAM_SIZE_256KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14532) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14533) tp->nvram_size = (protect ? 0x1f200 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14534) TG3_NVRAM_SIZE_128KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14536) case FLASH_5752VENDOR_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14537) case FLASH_5752VENDOR_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14538) case FLASH_5752VENDOR_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14539) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14540) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14541) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14542) tp->nvram_pagesize = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14543) if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14544) tp->nvram_size = (protect ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14545) TG3_NVRAM_SIZE_64KB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14546) TG3_NVRAM_SIZE_128KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14547) else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14548) tp->nvram_size = (protect ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14549) TG3_NVRAM_SIZE_64KB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14550) TG3_NVRAM_SIZE_256KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14551) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14552) tp->nvram_size = (protect ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14553) TG3_NVRAM_SIZE_128KB :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14554) TG3_NVRAM_SIZE_512KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14559) static void tg3_get_5787_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14561) u32 nvcfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14563) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14565) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14566) case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14567) case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14568) case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14569) case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14570) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14571) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14572) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14574) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14575) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14576) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14577) case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14578) case FLASH_5755VENDOR_ATMEL_FLASH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14579) case FLASH_5755VENDOR_ATMEL_FLASH_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14580) case FLASH_5755VENDOR_ATMEL_FLASH_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14581) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14582) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14583) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14584) tp->nvram_pagesize = 264;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14586) case FLASH_5752VENDOR_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14587) case FLASH_5752VENDOR_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14588) case FLASH_5752VENDOR_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14589) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14590) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14591) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14592) tp->nvram_pagesize = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14597) static void tg3_get_5761_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14599) u32 nvcfg1, protect = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14601) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14603) /* NVRAM protection for TPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14604) if (nvcfg1 & (1 << 27)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14605) tg3_flag_set(tp, PROTECTED_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14606) protect = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14609) nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14610) switch (nvcfg1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14611) case FLASH_5761VENDOR_ATMEL_ADB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14612) case FLASH_5761VENDOR_ATMEL_ADB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14613) case FLASH_5761VENDOR_ATMEL_ADB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14614) case FLASH_5761VENDOR_ATMEL_ADB161D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14615) case FLASH_5761VENDOR_ATMEL_MDB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14616) case FLASH_5761VENDOR_ATMEL_MDB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14617) case FLASH_5761VENDOR_ATMEL_MDB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14618) case FLASH_5761VENDOR_ATMEL_MDB161D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14619) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14620) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14621) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14622) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14623) tp->nvram_pagesize = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14625) case FLASH_5761VENDOR_ST_A_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14626) case FLASH_5761VENDOR_ST_A_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14627) case FLASH_5761VENDOR_ST_A_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14628) case FLASH_5761VENDOR_ST_A_M45PE16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14629) case FLASH_5761VENDOR_ST_M_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14630) case FLASH_5761VENDOR_ST_M_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14631) case FLASH_5761VENDOR_ST_M_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14632) case FLASH_5761VENDOR_ST_M_M45PE16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14633) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14634) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14635) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14636) tp->nvram_pagesize = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14640) if (protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14641) tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14642) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14643) switch (nvcfg1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14644) case FLASH_5761VENDOR_ATMEL_ADB161D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14645) case FLASH_5761VENDOR_ATMEL_MDB161D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14646) case FLASH_5761VENDOR_ST_A_M45PE16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14647) case FLASH_5761VENDOR_ST_M_M45PE16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14648) tp->nvram_size = TG3_NVRAM_SIZE_2MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14650) case FLASH_5761VENDOR_ATMEL_ADB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14651) case FLASH_5761VENDOR_ATMEL_MDB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14652) case FLASH_5761VENDOR_ST_A_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14653) case FLASH_5761VENDOR_ST_M_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14654) tp->nvram_size = TG3_NVRAM_SIZE_1MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14656) case FLASH_5761VENDOR_ATMEL_ADB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14657) case FLASH_5761VENDOR_ATMEL_MDB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14658) case FLASH_5761VENDOR_ST_A_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14659) case FLASH_5761VENDOR_ST_M_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14660) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14662) case FLASH_5761VENDOR_ATMEL_ADB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14663) case FLASH_5761VENDOR_ATMEL_MDB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14664) case FLASH_5761VENDOR_ST_A_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14665) case FLASH_5761VENDOR_ST_M_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14666) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14672) static void tg3_get_5906_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14674) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14675) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14676) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14679) static void tg3_get_57780_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14681) u32 nvcfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14683) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14685) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14686) case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14687) case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14688) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14689) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14690) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14692) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14693) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14695) case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14696) case FLASH_57780VENDOR_ATMEL_AT45DB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14697) case FLASH_57780VENDOR_ATMEL_AT45DB011B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14698) case FLASH_57780VENDOR_ATMEL_AT45DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14699) case FLASH_57780VENDOR_ATMEL_AT45DB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14700) case FLASH_57780VENDOR_ATMEL_AT45DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14701) case FLASH_57780VENDOR_ATMEL_AT45DB041B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14702) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14703) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14704) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14706) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14707) case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14708) case FLASH_57780VENDOR_ATMEL_AT45DB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14709) case FLASH_57780VENDOR_ATMEL_AT45DB011B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14710) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14712) case FLASH_57780VENDOR_ATMEL_AT45DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14713) case FLASH_57780VENDOR_ATMEL_AT45DB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14714) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14716) case FLASH_57780VENDOR_ATMEL_AT45DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14717) case FLASH_57780VENDOR_ATMEL_AT45DB041B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14718) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14722) case FLASH_5752VENDOR_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14723) case FLASH_5752VENDOR_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14724) case FLASH_5752VENDOR_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14725) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14726) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14727) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14729) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14730) case FLASH_5752VENDOR_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14731) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14733) case FLASH_5752VENDOR_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14734) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14736) case FLASH_5752VENDOR_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14737) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14741) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14742) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14743) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14746) tg3_nvram_get_pagesize(tp, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14747) if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14748) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14752) static void tg3_get_5717_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14754) u32 nvcfg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14756) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14758) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14759) case FLASH_5717VENDOR_ATMEL_EEPROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14760) case FLASH_5717VENDOR_MICRO_EEPROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14761) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14762) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14763) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14765) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14766) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14767) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14768) case FLASH_5717VENDOR_ATMEL_MDB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14769) case FLASH_5717VENDOR_ATMEL_ADB011B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14770) case FLASH_5717VENDOR_ATMEL_ADB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14771) case FLASH_5717VENDOR_ATMEL_MDB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14772) case FLASH_5717VENDOR_ATMEL_ADB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14773) case FLASH_5717VENDOR_ATMEL_ADB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14774) case FLASH_5717VENDOR_ATMEL_45USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14775) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14776) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14777) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14779) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14780) case FLASH_5717VENDOR_ATMEL_MDB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14781) /* Detect size with tg3_nvram_get_size() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14783) case FLASH_5717VENDOR_ATMEL_ADB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14784) case FLASH_5717VENDOR_ATMEL_ADB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14785) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14786) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14787) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14788) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14792) case FLASH_5717VENDOR_ST_M_M25PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14793) case FLASH_5717VENDOR_ST_A_M25PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14794) case FLASH_5717VENDOR_ST_M_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14795) case FLASH_5717VENDOR_ST_A_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14796) case FLASH_5717VENDOR_ST_M_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14797) case FLASH_5717VENDOR_ST_A_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14798) case FLASH_5717VENDOR_ST_M_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14799) case FLASH_5717VENDOR_ST_A_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14800) case FLASH_5717VENDOR_ST_25USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14801) case FLASH_5717VENDOR_ST_45USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14802) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14803) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14804) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14806) switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14807) case FLASH_5717VENDOR_ST_M_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14808) case FLASH_5717VENDOR_ST_M_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14809) /* Detect size with tg3_nvram_get_size() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14811) case FLASH_5717VENDOR_ST_A_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14812) case FLASH_5717VENDOR_ST_A_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14813) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14814) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14815) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14816) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14819) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14820) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14821) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14822) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14825) tg3_nvram_get_pagesize(tp, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14826) if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14827) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14830) static void tg3_get_5720_nvram_info(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14832) u32 nvcfg1, nvmpinstrp, nv_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14834) nvcfg1 = tr32(NVRAM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14835) nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14837) if (tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14838) if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14839) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14840) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14843) switch (nvmpinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14844) case FLASH_5762_MX25L_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14845) case FLASH_5762_MX25L_200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14846) case FLASH_5762_MX25L_400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14847) case FLASH_5762_MX25L_800:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14848) case FLASH_5762_MX25L_160_320:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14849) tp->nvram_pagesize = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14850) tp->nvram_jedecnum = JEDEC_MACRONIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14851) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14852) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14853) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14854) nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14855) tp->nvram_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14856) (1 << (nv_status >> AUTOSENSE_DEVID &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14857) AUTOSENSE_DEVID_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14858) << AUTOSENSE_SIZE_IN_MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14859) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14861) case FLASH_5762_EEPROM_HD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14862) nvmpinstrp = FLASH_5720_EEPROM_HD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14864) case FLASH_5762_EEPROM_LD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14865) nvmpinstrp = FLASH_5720_EEPROM_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14867) case FLASH_5720VENDOR_M_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14868) /* This pinstrap supports multiple sizes, so force it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14869) * to read the actual size from location 0xf0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14871) nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14876) switch (nvmpinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14877) case FLASH_5720_EEPROM_HD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14878) case FLASH_5720_EEPROM_LD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14879) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14880) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14882) nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14883) tw32(NVRAM_CFG1, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14884) if (nvmpinstrp == FLASH_5720_EEPROM_HD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14885) tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14886) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14887) tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14888) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14889) case FLASH_5720VENDOR_M_ATMEL_DB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14890) case FLASH_5720VENDOR_A_ATMEL_DB011B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14891) case FLASH_5720VENDOR_A_ATMEL_DB011D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14892) case FLASH_5720VENDOR_M_ATMEL_DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14893) case FLASH_5720VENDOR_A_ATMEL_DB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14894) case FLASH_5720VENDOR_A_ATMEL_DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14895) case FLASH_5720VENDOR_M_ATMEL_DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14896) case FLASH_5720VENDOR_A_ATMEL_DB041B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14897) case FLASH_5720VENDOR_A_ATMEL_DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14898) case FLASH_5720VENDOR_M_ATMEL_DB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14899) case FLASH_5720VENDOR_A_ATMEL_DB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14900) case FLASH_5720VENDOR_ATMEL_45USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14901) tp->nvram_jedecnum = JEDEC_ATMEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14902) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14903) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14905) switch (nvmpinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14906) case FLASH_5720VENDOR_M_ATMEL_DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14907) case FLASH_5720VENDOR_A_ATMEL_DB021B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14908) case FLASH_5720VENDOR_A_ATMEL_DB021D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14909) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14910) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14911) case FLASH_5720VENDOR_M_ATMEL_DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14912) case FLASH_5720VENDOR_A_ATMEL_DB041B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14913) case FLASH_5720VENDOR_A_ATMEL_DB041D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14914) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14916) case FLASH_5720VENDOR_M_ATMEL_DB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14917) case FLASH_5720VENDOR_A_ATMEL_DB081D:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14918) tp->nvram_size = TG3_NVRAM_SIZE_1MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14920) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14921) if (tg3_asic_rev(tp) != ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14922) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14926) case FLASH_5720VENDOR_M_ST_M25PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14927) case FLASH_5720VENDOR_M_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14928) case FLASH_5720VENDOR_A_ST_M25PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14929) case FLASH_5720VENDOR_A_ST_M45PE10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14930) case FLASH_5720VENDOR_M_ST_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14931) case FLASH_5720VENDOR_M_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14932) case FLASH_5720VENDOR_A_ST_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14933) case FLASH_5720VENDOR_A_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14934) case FLASH_5720VENDOR_M_ST_M25PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14935) case FLASH_5720VENDOR_M_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14936) case FLASH_5720VENDOR_A_ST_M25PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14937) case FLASH_5720VENDOR_A_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14938) case FLASH_5720VENDOR_M_ST_M25PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14939) case FLASH_5720VENDOR_M_ST_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14940) case FLASH_5720VENDOR_A_ST_M25PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14941) case FLASH_5720VENDOR_A_ST_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14942) case FLASH_5720VENDOR_ST_25USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14943) case FLASH_5720VENDOR_ST_45USPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14944) tp->nvram_jedecnum = JEDEC_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14945) tg3_flag_set(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14946) tg3_flag_set(tp, FLASH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14948) switch (nvmpinstrp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14949) case FLASH_5720VENDOR_M_ST_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14950) case FLASH_5720VENDOR_M_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14951) case FLASH_5720VENDOR_A_ST_M25PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14952) case FLASH_5720VENDOR_A_ST_M45PE20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14953) tp->nvram_size = TG3_NVRAM_SIZE_256KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14955) case FLASH_5720VENDOR_M_ST_M25PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14956) case FLASH_5720VENDOR_M_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14957) case FLASH_5720VENDOR_A_ST_M25PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14958) case FLASH_5720VENDOR_A_ST_M45PE40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14959) tp->nvram_size = TG3_NVRAM_SIZE_512KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14960) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14961) case FLASH_5720VENDOR_M_ST_M25PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14962) case FLASH_5720VENDOR_M_ST_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14963) case FLASH_5720VENDOR_A_ST_M25PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14964) case FLASH_5720VENDOR_A_ST_M45PE80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14965) tp->nvram_size = TG3_NVRAM_SIZE_1MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14966) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14967) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14968) if (tg3_asic_rev(tp) != ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14969) tp->nvram_size = TG3_NVRAM_SIZE_128KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14973) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14974) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14975) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14978) tg3_nvram_get_pagesize(tp, nvcfg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14979) if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14980) tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14982) if (tg3_asic_rev(tp) == ASIC_REV_5762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14983) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14985) if (tg3_nvram_read(tp, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14986) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14988) if (val != TG3_EEPROM_MAGIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14989) (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14990) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14994) /* Chips other than 5700/5701 use the NVRAM for fetching info. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14995) static void tg3_nvram_init(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14997) if (tg3_flag(tp, IS_SSB_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14998) /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14999) tg3_flag_clear(tp, NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15000) tg3_flag_clear(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15001) tg3_flag_set(tp, NO_NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15002) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15005) tw32_f(GRC_EEPROM_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15006) (EEPROM_ADDR_FSM_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15007) (EEPROM_DEFAULT_CLOCK_PERIOD <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15008) EEPROM_ADDR_CLKPERD_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15010) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15012) /* Enable seeprom accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15013) tw32_f(GRC_LOCAL_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15014) tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15015) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15017) if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15018) tg3_asic_rev(tp) != ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15019) tg3_flag_set(tp, NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15021) if (tg3_nvram_lock(tp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15022) netdev_warn(tp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15023) "Cannot get nvram lock, %s failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15024) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15025) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15027) tg3_enable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15029) tp->nvram_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15031) if (tg3_asic_rev(tp) == ASIC_REV_5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15032) tg3_get_5752_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15033) else if (tg3_asic_rev(tp) == ASIC_REV_5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15034) tg3_get_5755_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15035) else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15036) tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15037) tg3_asic_rev(tp) == ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15038) tg3_get_5787_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15039) else if (tg3_asic_rev(tp) == ASIC_REV_5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15040) tg3_get_5761_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15041) else if (tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15042) tg3_get_5906_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15043) else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15044) tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15045) tg3_get_57780_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15046) else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15047) tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15048) tg3_get_5717_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15049) else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15050) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15051) tg3_get_5720_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15052) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15053) tg3_get_nvram_info(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15055) if (tp->nvram_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15056) tg3_get_nvram_size(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15058) tg3_disable_nvram_access(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15059) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15061) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15062) tg3_flag_clear(tp, NVRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15063) tg3_flag_clear(tp, NVRAM_BUFFERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15065) tg3_get_eeprom_size(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15069) struct subsys_tbl_ent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15070) u16 subsys_vendor, subsys_devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15071) u32 phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15072) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15074) static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15075) /* Broadcom boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15076) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15077) TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15078) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15079) TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15080) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15081) TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15082) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15083) TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15084) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15085) TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15086) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15087) TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15088) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15089) TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15090) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15091) TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15092) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15093) TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15094) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15095) TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15096) { TG3PCI_SUBVENDOR_ID_BROADCOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15097) TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15099) /* 3com boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15100) { TG3PCI_SUBVENDOR_ID_3COM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15101) TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15102) { TG3PCI_SUBVENDOR_ID_3COM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15103) TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15104) { TG3PCI_SUBVENDOR_ID_3COM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15105) TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15106) { TG3PCI_SUBVENDOR_ID_3COM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15107) TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15108) { TG3PCI_SUBVENDOR_ID_3COM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15109) TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15111) /* DELL boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15112) { TG3PCI_SUBVENDOR_ID_DELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15113) TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15114) { TG3PCI_SUBVENDOR_ID_DELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15115) TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15116) { TG3PCI_SUBVENDOR_ID_DELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15117) TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15118) { TG3PCI_SUBVENDOR_ID_DELL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15119) TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15121) /* Compaq boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15122) { TG3PCI_SUBVENDOR_ID_COMPAQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15123) TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15124) { TG3PCI_SUBVENDOR_ID_COMPAQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15125) TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15126) { TG3PCI_SUBVENDOR_ID_COMPAQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15127) TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15128) { TG3PCI_SUBVENDOR_ID_COMPAQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15129) TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15130) { TG3PCI_SUBVENDOR_ID_COMPAQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15131) TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15133) /* IBM boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15134) { TG3PCI_SUBVENDOR_ID_IBM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15135) TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15138) static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15140) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15142) for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15143) if ((subsys_id_to_phy_id[i].subsys_vendor ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15144) tp->pdev->subsystem_vendor) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15145) (subsys_id_to_phy_id[i].subsys_devid ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15146) tp->pdev->subsystem_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15147) return &subsys_id_to_phy_id[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15149) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15152) static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15154) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15156) tp->phy_id = TG3_PHY_ID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15157) tp->led_ctrl = LED_CTRL_MODE_PHY_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15159) /* Assume an onboard device and WOL capable by default. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15160) tg3_flag_set(tp, EEPROM_WRITE_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15161) tg3_flag_set(tp, WOL_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15163) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15164) if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15165) tg3_flag_clear(tp, EEPROM_WRITE_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15166) tg3_flag_set(tp, IS_NIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15168) val = tr32(VCPU_CFGSHDW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15169) if (val & VCPU_CFGSHDW_ASPM_DBNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15170) tg3_flag_set(tp, ASPM_WORKAROUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15171) if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15172) (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15173) tg3_flag_set(tp, WOL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15174) device_set_wakeup_enable(&tp->pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15176) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15179) tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15180) if (val == NIC_SRAM_DATA_SIG_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15181) u32 nic_cfg, led_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15182) u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15183) u32 nic_phy_id, ver, eeprom_phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15184) int eeprom_phy_serdes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15186) tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15187) tp->nic_sram_data_cfg = nic_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15189) tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15190) ver >>= NIC_SRAM_DATA_VER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15191) if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15192) tg3_asic_rev(tp) != ASIC_REV_5701 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15193) tg3_asic_rev(tp) != ASIC_REV_5703 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15194) (ver > 0) && (ver < 0x100))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15195) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15197) if (tg3_asic_rev(tp) == ASIC_REV_5785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15198) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15200) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15201) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15202) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15203) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15205) if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15206) NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15207) eeprom_phy_serdes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15209) tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15210) if (nic_phy_id != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15211) u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15212) u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15214) eeprom_phy_id = (id1 >> 16) << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15215) eeprom_phy_id |= (id2 & 0xfc00) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15216) eeprom_phy_id |= (id2 & 0x03ff) << 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15217) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15218) eeprom_phy_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15220) tp->phy_id = eeprom_phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15221) if (eeprom_phy_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15222) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15223) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15225) tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15228) if (tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15229) led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15230) SHASTA_EXT_LED_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15231) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15232) led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15234) switch (led_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15235) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15236) case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15237) tp->led_ctrl = LED_CTRL_MODE_PHY_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15240) case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15241) tp->led_ctrl = LED_CTRL_MODE_PHY_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15244) case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15245) tp->led_ctrl = LED_CTRL_MODE_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15247) /* Default to PHY_1_MODE if 0 (MAC_MODE) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15248) * read on some older 5700/5701 bootcode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15250) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15251) tg3_asic_rev(tp) == ASIC_REV_5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15252) tp->led_ctrl = LED_CTRL_MODE_PHY_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15256) case SHASTA_EXT_LED_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15257) tp->led_ctrl = LED_CTRL_MODE_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15258) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15259) tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15260) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15261) LED_CTRL_MODE_PHY_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15263) if (tg3_flag(tp, 5717_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15264) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15265) tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15266) LED_CTRL_BLINK_RATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15270) case SHASTA_EXT_LED_MAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15271) tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15272) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15274) case SHASTA_EXT_LED_COMBO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15275) tp->led_ctrl = LED_CTRL_MODE_COMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15276) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15277) tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15278) LED_CTRL_MODE_PHY_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15283) if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15284) tg3_asic_rev(tp) == ASIC_REV_5701) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15285) tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15286) tp->led_ctrl = LED_CTRL_MODE_PHY_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15288) if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15289) tp->led_ctrl = LED_CTRL_MODE_PHY_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15291) if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15292) tg3_flag_set(tp, EEPROM_WRITE_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15293) if ((tp->pdev->subsystem_vendor ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15294) PCI_VENDOR_ID_ARIMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15295) (tp->pdev->subsystem_device == 0x205a ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15296) tp->pdev->subsystem_device == 0x2063))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15297) tg3_flag_clear(tp, EEPROM_WRITE_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15298) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15299) tg3_flag_clear(tp, EEPROM_WRITE_PROT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15300) tg3_flag_set(tp, IS_NIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15303) if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15304) tg3_flag_set(tp, ENABLE_ASF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15305) if (tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15306) tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15309) if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15310) tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15311) tg3_flag_set(tp, ENABLE_APE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15313) if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15314) !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15315) tg3_flag_clear(tp, WOL_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15317) if (tg3_flag(tp, WOL_CAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15318) (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15319) tg3_flag_set(tp, WOL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15320) device_set_wakeup_enable(&tp->pdev->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15323) if (cfg2 & (1 << 17))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15324) tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15326) /* serdes signal pre-emphasis in register 0x590 set by */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15327) /* bootcode if bit 18 is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15328) if (cfg2 & (1 << 18))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15329) tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15331) if ((tg3_flag(tp, 57765_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15332) (tg3_asic_rev(tp) == ASIC_REV_5784 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15333) tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15334) (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15335) tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15337) if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15338) u32 cfg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15340) tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15341) if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15342) !tg3_flag(tp, 57765_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15343) (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15344) tg3_flag_set(tp, ASPM_WORKAROUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15345) if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15346) tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15347) if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15348) tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15351) if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15352) tg3_flag_set(tp, RGMII_INBAND_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15353) if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15354) tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15355) if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15356) tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15358) if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15359) tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15361) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15362) if (tg3_flag(tp, WOL_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15363) device_set_wakeup_enable(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15364) tg3_flag(tp, WOL_ENABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15365) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15366) device_set_wakeup_capable(&tp->pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15369) static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15371) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15372) u32 val2, off = offset * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15374) err = tg3_nvram_lock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15375) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15376) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15378) tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15379) tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15380) APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15381) tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15382) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15384) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15385) val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15386) if (val2 & APE_OTP_STATUS_CMD_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15387) *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15388) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15390) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15393) tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15395) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15396) if (val2 & APE_OTP_STATUS_CMD_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15397) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15399) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15402) static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15404) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15405) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15407) tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15408) tw32(OTP_CTRL, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15410) /* Wait for up to 1 ms for command to execute. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15411) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15412) val = tr32(OTP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15413) if (val & OTP_STATUS_CMD_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15415) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15418) return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15421) /* Read the gphy configuration from the OTP region of the chip. The gphy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15422) * configuration is a 32-bit value that straddles the alignment boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15423) * We do two 32-bit reads and then shift and merge the results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15425) static u32 tg3_read_otp_phycfg(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15427) u32 bhalf_otp, thalf_otp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15429) tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15431) if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15432) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15434) tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15436) if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15439) thalf_otp = tr32(OTP_READ_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15441) tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15443) if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15446) bhalf_otp = tr32(OTP_READ_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15448) return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15451) static void tg3_phy_init_link_config(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15453) u32 adv = ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15455) if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15456) if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15457) adv |= ADVERTISED_1000baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15458) adv |= ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15461) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15462) adv |= ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15463) ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15464) ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15465) ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15466) ADVERTISED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15467) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15468) adv |= ADVERTISED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15470) tp->link_config.advertising = adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15471) tp->link_config.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15472) tp->link_config.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15473) tp->link_config.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15474) tp->link_config.active_speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15475) tp->link_config.active_duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15477) tp->old_link = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15480) static int tg3_phy_probe(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15482) u32 hw_phy_id_1, hw_phy_id_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15483) u32 hw_phy_id, hw_phy_id_masked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15484) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15486) /* flow control autonegotiation is default behavior */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15487) tg3_flag_set(tp, PAUSE_AUTONEG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15488) tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15490) if (tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15491) switch (tp->pci_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15492) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15493) tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15495) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15496) tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15498) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15499) tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15500) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15501) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15502) tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15507) if (!tg3_flag(tp, ENABLE_ASF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15508) !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15509) !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15510) tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15511) TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15513) if (tg3_flag(tp, USE_PHYLIB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15514) return tg3_phy_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15516) /* Reading the PHY ID register can conflict with ASF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15517) * firmware access to the PHY hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15519) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15520) if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15521) hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15522) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15523) /* Now read the physical PHY_ID from the chip and verify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15524) * that it is sane. If it doesn't look good, we fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15525) * to either the hard-coded table based PHY_ID and failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15526) * that the value found in the eeprom area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15528) err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15529) err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15531) hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15532) hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15533) hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15535) hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15538) if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15539) tp->phy_id = hw_phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15540) if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15541) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15542) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15543) tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15545) if (tp->phy_id != TG3_PHY_ID_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15546) /* Do nothing, phy ID already set up in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15547) * tg3_get_eeprom_hw_cfg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15549) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15550) struct subsys_tbl_ent *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15552) /* No eeprom signature? Try the hardcoded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15553) * subsys device table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15555) p = tg3_lookup_by_subsys(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15556) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15557) tp->phy_id = p->phy_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15558) } else if (!tg3_flag(tp, IS_SSB_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15559) /* For now we saw the IDs 0xbc050cd0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15560) * 0xbc050f80 and 0xbc050c30 on devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15561) * connected to an BCM4785 and there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15562) * probably more. Just assume that the phy is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15563) * supported when it is connected to a SSB core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15564) * for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15566) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15569) if (!tp->phy_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15570) tp->phy_id == TG3_PHY_ID_BCM8002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15571) tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15575) if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15576) (tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15577) tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15578) tg3_asic_rev(tp) == ASIC_REV_57766 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15579) tg3_asic_rev(tp) == ASIC_REV_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15580) (tg3_asic_rev(tp) == ASIC_REV_5717 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15581) tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15582) (tg3_asic_rev(tp) == ASIC_REV_57765 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15583) tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15584) tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15586) tp->eee.supported = SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15587) SUPPORTED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15588) tp->eee.advertised = ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15589) ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15590) tp->eee.eee_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15591) tp->eee.tx_lpi_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15592) tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15595) tg3_phy_init_link_config(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15597) if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15598) !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15599) !tg3_flag(tp, ENABLE_APE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15600) !tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15601) u32 bmsr, dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15603) tg3_readphy(tp, MII_BMSR, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15604) if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15605) (bmsr & BMSR_LSTATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15606) goto skip_phy_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15608) err = tg3_phy_reset(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15609) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15610) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15612) tg3_phy_set_wirespeed(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15614) if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15615) tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15616) tp->link_config.flowctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15618) tg3_writephy(tp, MII_BMCR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15619) BMCR_ANENABLE | BMCR_ANRESTART);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15623) skip_phy_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15624) if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15625) err = tg3_init_5401phy_dsp(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15626) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15629) err = tg3_init_5401phy_dsp(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15632) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15635) static void tg3_read_vpd(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15637) u8 *vpd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15638) unsigned int block_end, rosize, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15639) u32 vpdlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15640) int j, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15642) vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15643) if (!vpd_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15644) goto out_no_vpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15646) i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15647) if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15648) goto out_not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15650) rosize = pci_vpd_lrdt_size(&vpd_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15651) block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15652) i += PCI_VPD_LRDT_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15654) if (block_end > vpdlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15655) goto out_not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15657) j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15658) PCI_VPD_RO_KEYWORD_MFR_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15659) if (j > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15660) len = pci_vpd_info_field_size(&vpd_data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15662) j += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15663) if (j + len > block_end || len != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15664) memcmp(&vpd_data[j], "1028", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15665) goto partno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15667) j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15668) PCI_VPD_RO_KEYWORD_VENDOR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15669) if (j < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15670) goto partno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15672) len = pci_vpd_info_field_size(&vpd_data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15674) j += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15675) if (j + len > block_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15676) goto partno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15678) if (len >= sizeof(tp->fw_ver))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15679) len = sizeof(tp->fw_ver) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15680) memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15681) snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15682) &vpd_data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15685) partno:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15686) i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15687) PCI_VPD_RO_KEYWORD_PARTNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15688) if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15689) goto out_not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15691) len = pci_vpd_info_field_size(&vpd_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15693) i += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15694) if (len > TG3_BPN_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15695) (len + i) > vpdlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15696) goto out_not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15698) memcpy(tp->board_part_number, &vpd_data[i], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15700) out_not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15701) kfree(vpd_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15702) if (tp->board_part_number[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15703) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15705) out_no_vpd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15706) if (tg3_asic_rev(tp) == ASIC_REV_5717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15707) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15708) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15709) strcpy(tp->board_part_number, "BCM5717");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15710) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15711) strcpy(tp->board_part_number, "BCM5718");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15712) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15713) goto nomatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15714) } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15715) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15716) strcpy(tp->board_part_number, "BCM57780");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15717) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15718) strcpy(tp->board_part_number, "BCM57760");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15719) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15720) strcpy(tp->board_part_number, "BCM57790");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15721) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15722) strcpy(tp->board_part_number, "BCM57788");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15723) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15724) goto nomatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15725) } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15726) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15727) strcpy(tp->board_part_number, "BCM57761");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15728) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15729) strcpy(tp->board_part_number, "BCM57765");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15730) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15731) strcpy(tp->board_part_number, "BCM57781");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15732) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15733) strcpy(tp->board_part_number, "BCM57785");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15734) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15735) strcpy(tp->board_part_number, "BCM57791");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15736) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15737) strcpy(tp->board_part_number, "BCM57795");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15739) goto nomatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15740) } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15741) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15742) strcpy(tp->board_part_number, "BCM57762");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15743) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15744) strcpy(tp->board_part_number, "BCM57766");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15745) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15746) strcpy(tp->board_part_number, "BCM57782");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15747) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15748) strcpy(tp->board_part_number, "BCM57786");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15749) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15750) goto nomatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15751) } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15752) strcpy(tp->board_part_number, "BCM95906");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15753) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15754) nomatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15755) strcpy(tp->board_part_number, "none");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15759) static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15761) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15763) if (tg3_nvram_read(tp, offset, &val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15764) (val & 0xfc000000) != 0x0c000000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15765) tg3_nvram_read(tp, offset + 4, &val) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15766) val != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15769) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15772) static void tg3_read_bc_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15774) u32 val, offset, start, ver_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15775) int i, dst_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15776) bool newver = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15778) if (tg3_nvram_read(tp, 0xc, &offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15779) tg3_nvram_read(tp, 0x4, &start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15780) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15782) offset = tg3_nvram_logical_addr(tp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15784) if (tg3_nvram_read(tp, offset, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15785) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15787) if ((val & 0xfc000000) == 0x0c000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15788) if (tg3_nvram_read(tp, offset + 4, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15791) if (val == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15792) newver = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15795) dst_off = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15797) if (newver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15798) if (TG3_VER_SIZE - dst_off < 16 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15799) tg3_nvram_read(tp, offset + 8, &ver_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15800) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15802) offset = offset + ver_offset - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15803) for (i = 0; i < 16; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15804) __be32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15805) if (tg3_nvram_read_be32(tp, offset + i, &v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15806) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15808) memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15810) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15811) u32 major, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15813) if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15814) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15816) major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15817) TG3_NVM_BCVER_MAJSFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15818) minor = ver_offset & TG3_NVM_BCVER_MINMSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15819) snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15820) "v%d.%02d", major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15824) static void tg3_read_hwsb_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15826) u32 val, major, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15828) /* Use native endian representation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15829) if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15830) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15832) major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15833) TG3_NVM_HWSB_CFG1_MAJSFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15834) minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15835) TG3_NVM_HWSB_CFG1_MINSFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15837) snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15840) static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15842) u32 offset, major, minor, build;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15844) strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15846) if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15847) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15849) switch (val & TG3_EEPROM_SB_REVISION_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15850) case TG3_EEPROM_SB_REVISION_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15851) offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15853) case TG3_EEPROM_SB_REVISION_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15854) offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15855) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15856) case TG3_EEPROM_SB_REVISION_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15857) offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15858) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15859) case TG3_EEPROM_SB_REVISION_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15860) offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15861) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15862) case TG3_EEPROM_SB_REVISION_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15863) offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15865) case TG3_EEPROM_SB_REVISION_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15866) offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15868) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15869) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15872) if (tg3_nvram_read(tp, offset, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15873) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15875) build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15876) TG3_EEPROM_SB_EDH_BLD_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15877) major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15878) TG3_EEPROM_SB_EDH_MAJ_SHFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15879) minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15881) if (minor > 99 || build > 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15882) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15884) offset = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15885) snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15886) " v%d.%02d", major, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15888) if (build > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15889) offset = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15890) if (offset < TG3_VER_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15891) tp->fw_ver[offset] = 'a' + build - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15895) static void tg3_read_mgmtfw_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15897) u32 val, offset, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15898) int i, vlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15900) for (offset = TG3_NVM_DIR_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15901) offset < TG3_NVM_DIR_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15902) offset += TG3_NVM_DIRENT_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15903) if (tg3_nvram_read(tp, offset, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15904) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15906) if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15910) if (offset == TG3_NVM_DIR_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15911) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15913) if (!tg3_flag(tp, 5705_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15914) start = 0x08000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15915) else if (tg3_nvram_read(tp, offset - 4, &start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15916) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15918) if (tg3_nvram_read(tp, offset + 4, &offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15919) !tg3_fw_img_is_valid(tp, offset) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15920) tg3_nvram_read(tp, offset + 8, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15921) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15923) offset += val - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15925) vlen = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15927) tp->fw_ver[vlen++] = ',';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15928) tp->fw_ver[vlen++] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15930) for (i = 0; i < 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15931) __be32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15932) if (tg3_nvram_read_be32(tp, offset, &v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15933) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15935) offset += sizeof(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15937) if (vlen > TG3_VER_SIZE - sizeof(v)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15938) memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15942) memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15943) vlen += sizeof(v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15947) static void tg3_probe_ncsi(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15949) u32 apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15951) apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15952) if (apedata != APE_SEG_SIG_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15953) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15955) apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15956) if (!(apedata & APE_FW_STATUS_READY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15957) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15959) if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15960) tg3_flag_set(tp, APE_HAS_NCSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15963) static void tg3_read_dash_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15965) int vlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15966) u32 apedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15967) char *fwtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15969) apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15971) if (tg3_flag(tp, APE_HAS_NCSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15972) fwtype = "NCSI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15973) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15974) fwtype = "SMASH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15975) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15976) fwtype = "DASH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15978) vlen = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15980) snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15981) fwtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15982) (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15983) (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15984) (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15985) (apedata & APE_FW_VERSION_BLDMSK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15988) static void tg3_read_otp_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15990) u32 val, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15992) if (tg3_asic_rev(tp) != ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15993) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15995) if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15996) !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15997) TG3_OTP_MAGIC0_VALID(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15998) u64 val64 = (u64) val << 32 | val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15999) u32 ver = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16000) int i, vlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16002) for (i = 0; i < 7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16003) if ((val64 & 0xff) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16004) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16005) ver = val64 & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16006) val64 >>= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16008) vlen = strlen(tp->fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16009) snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16013) static void tg3_read_fw_ver(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16015) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16016) bool vpd_vers = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16018) if (tp->fw_ver[0] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16019) vpd_vers = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16021) if (tg3_flag(tp, NO_NVRAM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16022) strcat(tp->fw_ver, "sb");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16023) tg3_read_otp_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16024) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16027) if (tg3_nvram_read(tp, 0, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16028) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16030) if (val == TG3_EEPROM_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16031) tg3_read_bc_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16032) else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16033) tg3_read_sb_ver(tp, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16034) else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16035) tg3_read_hwsb_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16037) if (tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16038) if (tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16039) tg3_probe_ncsi(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16040) if (!vpd_vers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16041) tg3_read_dash_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16042) } else if (!vpd_vers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16043) tg3_read_mgmtfw_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16047) tp->fw_ver[TG3_VER_SIZE - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16050) static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16052) if (tg3_flag(tp, LRG_PROD_RING_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16053) return TG3_RX_RET_MAX_SIZE_5717;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16054) else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16055) return TG3_RX_RET_MAX_SIZE_5700;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16056) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16057) return TG3_RX_RET_MAX_SIZE_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16060) static const struct pci_device_id tg3_write_reorder_chipsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16061) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16062) { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16063) { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16064) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16065) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16067) static struct pci_dev *tg3_find_peer(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16069) struct pci_dev *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16070) unsigned int func, devnr = tp->pdev->devfn & ~7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16072) for (func = 0; func < 8; func++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16073) peer = pci_get_slot(tp->pdev->bus, devnr | func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16074) if (peer && peer != tp->pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16076) pci_dev_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16078) /* 5704 can be configured in single-port mode, set peer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16079) * tp->pdev in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16081) if (!peer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16082) peer = tp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16083) return peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16087) * We don't need to keep the refcount elevated; there's no way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16088) * to remove one half of this device without removing the other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16090) pci_dev_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16092) return peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16095) static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16097) tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16098) if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16099) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16101) /* All devices that use the alternate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16102) * ASIC REV location have a CPMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16104) tg3_flag_set(tp, CPMU_PRESENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16106) if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16107) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16108) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16109) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16110) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16111) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16112) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16113) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16114) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16115) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16116) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16117) reg = TG3PCI_GEN2_PRODID_ASICREV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16118) else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16119) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16120) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16121) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16122) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16123) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16124) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16125) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16126) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16127) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16128) reg = TG3PCI_GEN15_PRODID_ASICREV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16129) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16130) reg = TG3PCI_PRODID_ASICREV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16132) pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16135) /* Wrong chip ID in 5752 A0. This code can be removed later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16136) * as A0 is not in production.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16138) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16139) tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16141) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16142) tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16144) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16145) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16146) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16147) tg3_flag_set(tp, 5717_PLUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16149) if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16150) tg3_asic_rev(tp) == ASIC_REV_57766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16151) tg3_flag_set(tp, 57765_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16153) if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16154) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16155) tg3_flag_set(tp, 57765_PLUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16157) /* Intentionally exclude ASIC_REV_5906 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16158) if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16159) tg3_asic_rev(tp) == ASIC_REV_5787 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16160) tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16161) tg3_asic_rev(tp) == ASIC_REV_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16162) tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16163) tg3_asic_rev(tp) == ASIC_REV_57780 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16164) tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16165) tg3_flag_set(tp, 5755_PLUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16167) if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16168) tg3_asic_rev(tp) == ASIC_REV_5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16169) tg3_flag_set(tp, 5780_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16171) if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16172) tg3_asic_rev(tp) == ASIC_REV_5752 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16173) tg3_asic_rev(tp) == ASIC_REV_5906 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16174) tg3_flag(tp, 5755_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16175) tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16176) tg3_flag_set(tp, 5750_PLUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16178) if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16179) tg3_flag(tp, 5750_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16180) tg3_flag_set(tp, 5705_PLUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16183) static bool tg3_10_100_only_device(struct tg3 *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16184) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16186) u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16188) if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16189) (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16190) (tp->phy_flags & TG3_PHYFLG_IS_FET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16191) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16193) if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16194) if (tg3_asic_rev(tp) == ASIC_REV_5705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16195) if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16196) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16197) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16198) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16202) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16205) static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16207) u32 misc_ctrl_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16208) u32 pci_state_reg, grc_misc_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16209) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16210) u16 pci_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16211) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16213) /* Force memory write invalidate off. If we leave it on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16214) * then on 5700_BX chips we have to enable a workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16215) * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16216) * to match the cacheline size. The Broadcom driver have this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16217) * workaround but turns MWI off all the times so never uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16218) * it. This seems to suggest that the workaround is insufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16220) pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16221) pci_cmd &= ~PCI_COMMAND_INVALIDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16222) pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16224) /* Important! -- Make sure register accesses are byteswapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16225) * correctly. Also, for those chips that require it, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16226) * sure that indirect register accesses are enabled before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16227) * the first operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16229) pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16230) &misc_ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16231) tp->misc_host_ctrl |= (misc_ctrl_reg &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16232) MISC_HOST_CTRL_CHIPREV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16233) pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16234) tp->misc_host_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16236) tg3_detect_asic_rev(tp, misc_ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16238) /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16239) * we need to disable memory and use config. cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16240) * only to access all registers. The 5702/03 chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16241) * can mistakenly decode the special cycles from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16242) * ICH chipsets as memory write cycles, causing corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16243) * of register and memory space. Only certain ICH bridges
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16244) * will drive special cycles with non-zero data during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16245) * address phase which can fall within the 5703's address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16246) * range. This is not an ICH bug as the PCI spec allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16247) * non-zero address during special cycles. However, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16248) * these ICH bridges are known to drive non-zero addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16249) * during special cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16251) * Since special cycles do not cross PCI bridges, we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16252) * enable this workaround if the 5703 is on the secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16253) * bus of these ICH bridges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16255) if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16256) (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16257) static struct tg3_dev_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16258) u32 vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16259) u32 device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16260) u32 rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16261) } ich_chipsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16262) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16263) PCI_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16264) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16265) PCI_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16266) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16267) 0xa },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16268) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16269) PCI_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16270) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16271) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16272) struct tg3_dev_id *pci_id = &ich_chipsets[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16273) struct pci_dev *bridge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16275) while (pci_id->vendor != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16276) bridge = pci_get_device(pci_id->vendor, pci_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16277) bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16278) if (!bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16279) pci_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16280) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16282) if (pci_id->rev != PCI_ANY_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16283) if (bridge->revision > pci_id->rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16284) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16286) if (bridge->subordinate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16287) (bridge->subordinate->number ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16288) tp->pdev->bus->number)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16289) tg3_flag_set(tp, ICH_WORKAROUND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16290) pci_dev_put(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16296) if (tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16297) static struct tg3_dev_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16298) u32 vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16299) u32 device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16300) } bridge_chipsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16301) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16302) { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16303) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16304) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16305) struct tg3_dev_id *pci_id = &bridge_chipsets[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16306) struct pci_dev *bridge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16308) while (pci_id->vendor != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16309) bridge = pci_get_device(pci_id->vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16310) pci_id->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16311) bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16312) if (!bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16313) pci_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16314) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16316) if (bridge->subordinate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16317) (bridge->subordinate->number <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16318) tp->pdev->bus->number) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16319) (bridge->subordinate->busn_res.end >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16320) tp->pdev->bus->number)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16321) tg3_flag_set(tp, 5701_DMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16322) pci_dev_put(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16323) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16328) /* The EPB bridge inside 5714, 5715, and 5780 cannot support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16329) * DMA addresses > 40-bit. This bridge may have other additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16330) * 57xx devices behind it in some 4-port NIC designs for example.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16331) * Any tg3 device found behind the bridge will also need the 40-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16332) * DMA workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16334) if (tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16335) tg3_flag_set(tp, 40BIT_DMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16336) tp->msi_cap = tp->pdev->msi_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16337) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16338) struct pci_dev *bridge = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16340) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16341) bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16342) PCI_DEVICE_ID_SERVERWORKS_EPB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16343) bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16344) if (bridge && bridge->subordinate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16345) (bridge->subordinate->number <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16346) tp->pdev->bus->number) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16347) (bridge->subordinate->busn_res.end >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16348) tp->pdev->bus->number)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16349) tg3_flag_set(tp, 40BIT_DMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16350) pci_dev_put(bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16353) } while (bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16356) if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16357) tg3_asic_rev(tp) == ASIC_REV_5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16358) tp->pdev_peer = tg3_find_peer(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16360) /* Determine TSO capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16361) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16362) ; /* Do nothing. HW bug. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16363) else if (tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16364) tg3_flag_set(tp, HW_TSO_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16365) else if (tg3_flag(tp, 5755_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16366) tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16367) tg3_flag_set(tp, HW_TSO_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16368) else if (tg3_flag(tp, 5750_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16369) tg3_flag_set(tp, HW_TSO_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16370) tg3_flag_set(tp, TSO_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16371) if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16372) tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16373) tg3_flag_clear(tp, TSO_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16374) } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16375) tg3_asic_rev(tp) != ASIC_REV_5701 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16376) tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16377) tg3_flag_set(tp, FW_TSO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16378) tg3_flag_set(tp, TSO_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16379) if (tg3_asic_rev(tp) == ASIC_REV_5705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16380) tp->fw_needed = FIRMWARE_TG3TSO5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16381) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16382) tp->fw_needed = FIRMWARE_TG3TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16385) /* Selectively allow TSO based on operating conditions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16386) if (tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16387) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16388) tg3_flag(tp, HW_TSO_3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16389) tg3_flag(tp, FW_TSO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16390) /* For firmware TSO, assume ASF is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16391) * We'll disable TSO later if we discover ASF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16392) * is enabled in tg3_get_eeprom_hw_cfg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16394) tg3_flag_set(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16396) tg3_flag_clear(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16397) tg3_flag_clear(tp, TSO_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16398) tp->fw_needed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16401) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16402) tp->fw_needed = FIRMWARE_TG3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16404) if (tg3_asic_rev(tp) == ASIC_REV_57766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16405) tp->fw_needed = FIRMWARE_TG357766;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16407) tp->irq_max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16409) if (tg3_flag(tp, 5750_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16410) tg3_flag_set(tp, SUPPORT_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16411) if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16412) tg3_chip_rev(tp) == CHIPREV_5750_BX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16413) (tg3_asic_rev(tp) == ASIC_REV_5714 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16414) tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16415) tp->pdev_peer == tp->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16416) tg3_flag_clear(tp, SUPPORT_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16418) if (tg3_flag(tp, 5755_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16419) tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16420) tg3_flag_set(tp, 1SHOT_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16423) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16424) tg3_flag_set(tp, SUPPORT_MSIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16425) tp->irq_max = TG3_IRQ_MAX_VECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16429) tp->txq_max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16430) tp->rxq_max = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16431) if (tp->irq_max > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16432) tp->rxq_max = TG3_RSS_MAX_NUM_QS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16433) tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16435) if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16436) tg3_asic_rev(tp) == ASIC_REV_5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16437) tp->txq_max = tp->irq_max - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16440) if (tg3_flag(tp, 5755_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16441) tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16442) tg3_flag_set(tp, SHORT_DMA_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16444) if (tg3_asic_rev(tp) == ASIC_REV_5719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16445) tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16447) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16448) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16449) tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16450) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16451) tg3_flag_set(tp, LRG_PROD_RING_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16453) if (tg3_flag(tp, 57765_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16454) tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16455) tg3_flag_set(tp, USE_JUMBO_BDFLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16457) if (!tg3_flag(tp, 5705_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16458) tg3_flag(tp, 5780_CLASS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16459) tg3_flag(tp, USE_JUMBO_BDFLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16460) tg3_flag_set(tp, JUMBO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16462) pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16463) &pci_state_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16465) if (pci_is_pcie(tp->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16466) u16 lnkctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16468) tg3_flag_set(tp, PCI_EXPRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16470) pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16471) if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16472) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16473) tg3_flag_clear(tp, HW_TSO_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16474) tg3_flag_clear(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16476) if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16477) tg3_asic_rev(tp) == ASIC_REV_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16478) tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16479) tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16480) tg3_flag_set(tp, CLKREQ_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16481) } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16482) tg3_flag_set(tp, L1PLLPD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16484) } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16485) /* BCM5785 devices are effectively PCIe devices, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16486) * follow PCIe codepaths, but do not have a PCIe capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16487) * section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16489) tg3_flag_set(tp, PCI_EXPRESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16490) } else if (!tg3_flag(tp, 5705_PLUS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16491) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16492) tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16493) if (!tp->pcix_cap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16494) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16495) "Cannot find PCI-X capability, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16496) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16499) if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16500) tg3_flag_set(tp, PCIX_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16503) /* If we have an AMD 762 or VIA K8T800 chipset, write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16504) * reordering to the mailbox registers done by the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16505) * controller can cause major troubles. We read back from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16506) * every mailbox register write to force the writes to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16507) * posted to the chip in order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16509) if (pci_dev_present(tg3_write_reorder_chipsets) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16510) !tg3_flag(tp, PCI_EXPRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16511) tg3_flag_set(tp, MBOX_WRITE_REORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16513) pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16514) &tp->pci_cacheline_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16515) pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16516) &tp->pci_lat_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16517) if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16518) tp->pci_lat_timer < 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16519) tp->pci_lat_timer = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16520) pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16521) tp->pci_lat_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16524) /* Important! -- It is critical that the PCI-X hw workaround
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16525) * situation is decided before the first MMIO register access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16527) if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16528) /* 5700 BX chips need to have their TX producer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16529) * mailboxes written twice to workaround a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16531) tg3_flag_set(tp, TXD_MBOX_HWBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16533) /* If we are in PCI-X mode, enable register write workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16534) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16535) * The workaround is to use indirect register accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16536) * for all chip writes not to mailbox registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16538) if (tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16539) u32 pm_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16541) tg3_flag_set(tp, PCIX_TARGET_HWBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16543) /* The chip can have it's power management PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16544) * space registers clobbered due to this bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16545) * So explicitly force the chip into D0 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16547) pci_read_config_dword(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16548) tp->pdev->pm_cap + PCI_PM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16549) &pm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16550) pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16551) pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16552) pci_write_config_dword(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16553) tp->pdev->pm_cap + PCI_PM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16554) pm_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16556) /* Also, force SERR#/PERR# in PCI command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16557) pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16558) pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16559) pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16563) if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16564) tg3_flag_set(tp, PCI_HIGH_SPEED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16565) if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16566) tg3_flag_set(tp, PCI_32BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16568) /* Chip-specific fixup from Broadcom driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16569) if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16570) (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16571) pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16572) pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16575) /* Default fast path register access methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16576) tp->read32 = tg3_read32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16577) tp->write32 = tg3_write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16578) tp->read32_mbox = tg3_read32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16579) tp->write32_mbox = tg3_write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16580) tp->write32_tx_mbox = tg3_write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16581) tp->write32_rx_mbox = tg3_write32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16583) /* Various workaround register access methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16584) if (tg3_flag(tp, PCIX_TARGET_HWBUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16585) tp->write32 = tg3_write_indirect_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16586) else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16587) (tg3_flag(tp, PCI_EXPRESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16588) tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16590) * Back to back register writes can cause problems on these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16591) * chips, the workaround is to read back all reg writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16592) * except those to mailbox regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16594) * See tg3_write_indirect_reg32().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16596) tp->write32 = tg3_write_flush_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16599) if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16600) tp->write32_tx_mbox = tg3_write32_tx_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16601) if (tg3_flag(tp, MBOX_WRITE_REORDER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16602) tp->write32_rx_mbox = tg3_write_flush_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16605) if (tg3_flag(tp, ICH_WORKAROUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16606) tp->read32 = tg3_read_indirect_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16607) tp->write32 = tg3_write_indirect_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16608) tp->read32_mbox = tg3_read_indirect_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16609) tp->write32_mbox = tg3_write_indirect_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16610) tp->write32_tx_mbox = tg3_write_indirect_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16611) tp->write32_rx_mbox = tg3_write_indirect_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16613) iounmap(tp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16614) tp->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16616) pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16617) pci_cmd &= ~PCI_COMMAND_MEMORY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16618) pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16620) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16621) tp->read32_mbox = tg3_read32_mbox_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16622) tp->write32_mbox = tg3_write32_mbox_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16623) tp->write32_tx_mbox = tg3_write32_mbox_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16624) tp->write32_rx_mbox = tg3_write32_mbox_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16627) if (tp->write32 == tg3_write_indirect_reg32 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16628) (tg3_flag(tp, PCIX_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16629) (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16630) tg3_asic_rev(tp) == ASIC_REV_5701)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16631) tg3_flag_set(tp, SRAM_USE_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16633) /* The memory arbiter has to be enabled in order for SRAM accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16634) * to succeed. Normally on powerup the tg3 chip firmware will make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16635) * sure it is enabled, but other entities such as system netboot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16636) * code might disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16638) val = tr32(MEMARB_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16639) tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16641) tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16642) if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16643) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16644) if (tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16645) pci_read_config_dword(tp->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16646) tp->pcix_cap + PCI_X_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16647) &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16648) tp->pci_fn = val & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16650) } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16651) tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16652) tg3_asic_rev(tp) == ASIC_REV_5720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16653) tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16654) if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16655) val = tr32(TG3_CPMU_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16657) if (tg3_asic_rev(tp) == ASIC_REV_5717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16658) tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16659) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16660) tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16661) TG3_CPMU_STATUS_FSHFT_5719;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16664) if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16665) tp->write32_tx_mbox = tg3_write_flush_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16666) tp->write32_rx_mbox = tg3_write_flush_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16669) /* Get eeprom hw config before calling tg3_set_power_state().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16670) * In particular, the TG3_FLAG_IS_NIC flag must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16671) * determined before calling tg3_set_power_state() so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16672) * we know whether or not to switch out of Vaux power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16673) * When the flag is set, it means that GPIO1 is used for eeprom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16674) * write protect and also implies that it is a LOM where GPIOs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16675) * are not used to switch power.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16677) tg3_get_eeprom_hw_cfg(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16679) if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16680) tg3_flag_clear(tp, TSO_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16681) tg3_flag_clear(tp, TSO_BUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16682) tp->fw_needed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16685) if (tg3_flag(tp, ENABLE_APE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16686) /* Allow reads and writes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16687) * APE register and memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16688) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16689) pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16690) PCISTATE_ALLOW_APE_SHMEM_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16691) PCISTATE_ALLOW_APE_PSPACE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16692) pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16693) pci_state_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16695) tg3_ape_lock_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16696) tp->ape_hb_interval =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16697) msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16700) /* Set up tp->grc_local_ctrl before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16701) * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16702) * will bring 5700's external PHY out of reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16703) * It is also used as eeprom write protect on LOMs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16705) tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16706) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16707) tg3_flag(tp, EEPROM_WRITE_PROT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16708) tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16709) GRC_LCLCTRL_GPIO_OUTPUT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16710) /* Unused GPIO3 must be driven as output on 5752 because there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16711) * are no pull-up resistors on unused GPIO pins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16713) else if (tg3_asic_rev(tp) == ASIC_REV_5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16714) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16716) if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16717) tg3_asic_rev(tp) == ASIC_REV_57780 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16718) tg3_flag(tp, 57765_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16719) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16721) if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16722) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16723) /* Turn off the debug UART. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16724) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16725) if (tg3_flag(tp, IS_NIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16726) /* Keep VMain power. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16727) tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16728) GRC_LCLCTRL_GPIO_OUTPUT0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16731) if (tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16732) tp->grc_local_ctrl |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16733) tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16735) /* Switch out of Vaux if it is a NIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16736) tg3_pwrsrc_switch_to_vmain(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16738) /* Derive initial jumbo mode from MTU assigned in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16739) * ether_setup() via the alloc_etherdev() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16741) if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16742) tg3_flag_set(tp, JUMBO_RING_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16744) /* Determine WakeOnLan speed to use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16745) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16746) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16747) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16748) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16749) tg3_flag_clear(tp, WOL_SPEED_100MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16750) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16751) tg3_flag_set(tp, WOL_SPEED_100MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16754) if (tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16755) tp->phy_flags |= TG3_PHYFLG_IS_FET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16757) /* A few boards don't want Ethernet@WireSpeed phy feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16758) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16759) (tg3_asic_rev(tp) == ASIC_REV_5705 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16760) (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16761) (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16762) (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16763) (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16764) tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16766) if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16767) tg3_chip_rev(tp) == CHIPREV_5704_AX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16768) tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16769) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16770) tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16772) if (tg3_flag(tp, 5705_PLUS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16773) !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16774) tg3_asic_rev(tp) != ASIC_REV_5785 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16775) tg3_asic_rev(tp) != ASIC_REV_57780 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16776) !tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16777) if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16778) tg3_asic_rev(tp) == ASIC_REV_5787 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16779) tg3_asic_rev(tp) == ASIC_REV_5784 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16780) tg3_asic_rev(tp) == ASIC_REV_5761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16781) if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16782) tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16783) tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16784) if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16785) tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16786) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16787) tp->phy_flags |= TG3_PHYFLG_BER_BUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16790) if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16791) tg3_chip_rev(tp) != CHIPREV_5784_AX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16792) tp->phy_otp = tg3_read_otp_phycfg(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16793) if (tp->phy_otp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16794) tp->phy_otp = TG3_OTP_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16797) if (tg3_flag(tp, CPMU_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16798) tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16799) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16800) tp->mi_mode = MAC_MI_MODE_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16802) tp->coalesce_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16803) if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16804) tg3_chip_rev(tp) != CHIPREV_5700_BX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16805) tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16807) /* Set these bits to enable statistics workaround. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16808) if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16809) tg3_asic_rev(tp) == ASIC_REV_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16810) tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16811) tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16812) tp->coalesce_mode |= HOSTCC_MODE_ATTN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16813) tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16816) if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16817) tg3_asic_rev(tp) == ASIC_REV_57780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16818) tg3_flag_set(tp, USE_PHYLIB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16820) err = tg3_mdio_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16821) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16822) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16824) /* Initialize data/descriptor byte/word swapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16825) val = tr32(GRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16826) if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16827) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16828) val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16829) GRC_MODE_WORD_SWAP_B2HRX_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16830) GRC_MODE_B2HRX_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16831) GRC_MODE_HTX2B_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16832) GRC_MODE_HOST_STACKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16833) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16834) val &= GRC_MODE_HOST_STACKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16836) tw32(GRC_MODE, val | tp->grc_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16838) tg3_switch_clocks(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16840) /* Clear this out for sanity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16841) tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16843) /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16844) tw32(TG3PCI_REG_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16846) pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16847) &pci_state_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16848) if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16849) !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16850) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16851) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16852) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16853) tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16854) void __iomem *sram_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16856) /* Write some dummy words into the SRAM status block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16857) * area, see if it reads back correctly. If the return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16858) * value is bad, force enable the PCIX workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16860) sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16862) writel(0x00000000, sram_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16863) writel(0x00000000, sram_base + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16864) writel(0xffffffff, sram_base + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16865) if (readl(sram_base) != 0x00000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16866) tg3_flag_set(tp, PCIX_TARGET_HWBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16870) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16871) tg3_nvram_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16873) /* If the device has an NVRAM, no need to load patch firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16874) if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16875) !tg3_flag(tp, NO_NVRAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16876) tp->fw_needed = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16878) grc_misc_cfg = tr32(GRC_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16879) grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16881) if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16882) (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16883) grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16884) tg3_flag_set(tp, IS_5788);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16886) if (!tg3_flag(tp, IS_5788) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16887) tg3_asic_rev(tp) != ASIC_REV_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16888) tg3_flag_set(tp, TAGGED_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16889) if (tg3_flag(tp, TAGGED_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16890) tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16891) HOSTCC_MODE_CLRTICK_TXBD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16893) tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16894) pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16895) tp->misc_host_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16898) /* Preserve the APE MAC_MODE bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16899) if (tg3_flag(tp, ENABLE_APE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16900) tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16902) tp->mac_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16904) if (tg3_10_100_only_device(tp, ent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16905) tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16907) err = tg3_phy_probe(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16908) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16909) dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16910) /* ... but do not return immediately ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16911) tg3_mdio_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16914) tg3_read_vpd(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16915) tg3_read_fw_ver(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16917) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16918) tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16919) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16920) if (tg3_asic_rev(tp) == ASIC_REV_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16921) tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16922) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16923) tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16926) /* 5700 {AX,BX} chips have a broken status block link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16927) * change bit implementation, so we must use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16928) * status register in those cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16930) if (tg3_asic_rev(tp) == ASIC_REV_5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16931) tg3_flag_set(tp, USE_LINKCHG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16932) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16933) tg3_flag_clear(tp, USE_LINKCHG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16935) /* The led_ctrl is set during tg3_phy_probe, here we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16936) * have to force the link status polling mechanism based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16937) * upon subsystem IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16939) if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16940) tg3_asic_rev(tp) == ASIC_REV_5701 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16941) !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16942) tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16943) tg3_flag_set(tp, USE_LINKCHG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16946) /* For all SERDES we poll the MAC status register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16947) if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16948) tg3_flag_set(tp, POLL_SERDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16949) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16950) tg3_flag_clear(tp, POLL_SERDES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16952) if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16953) tg3_flag_set(tp, POLL_CPMU_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16955) tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16956) tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16957) if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16958) tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16959) tp->rx_offset = NET_SKB_PAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16960) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16961) tp->rx_copy_thresh = ~(u16)0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16962) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16965) tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16966) tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16967) tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16969) tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16971) /* Increment the rx prod index on the rx std ring by at most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16972) * 8 for these chips to workaround hw errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16974) if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16975) tg3_asic_rev(tp) == ASIC_REV_5752 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16976) tg3_asic_rev(tp) == ASIC_REV_5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16977) tp->rx_std_max_post = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16979) if (tg3_flag(tp, ASPM_WORKAROUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16980) tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16981) PCIE_PWR_MGMT_L1_THRESH_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16983) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16986) static int tg3_get_device_address(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16988) struct net_device *dev = tp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16989) u32 hi, lo, mac_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16990) int addr_ok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16991) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16993) if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16994) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16996) if (tg3_flag(tp, IS_SSB_CORE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16997) err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16998) if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16999) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17002) mac_offset = 0x7c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17003) if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17004) tg3_flag(tp, 5780_CLASS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17005) if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17006) mac_offset = 0xcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17007) if (tg3_nvram_lock(tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17008) tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17009) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17010) tg3_nvram_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17011) } else if (tg3_flag(tp, 5717_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17012) if (tp->pci_fn & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17013) mac_offset = 0xcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17014) if (tp->pci_fn > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17015) mac_offset += 0x18c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17016) } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17017) mac_offset = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17019) /* First try to get it from MAC address mailbox. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17020) tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17021) if ((hi >> 16) == 0x484b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17022) dev->dev_addr[0] = (hi >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17023) dev->dev_addr[1] = (hi >> 0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17025) tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17026) dev->dev_addr[2] = (lo >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17027) dev->dev_addr[3] = (lo >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17028) dev->dev_addr[4] = (lo >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17029) dev->dev_addr[5] = (lo >> 0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17031) /* Some old bootcode may report a 0 MAC address in SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17032) addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17034) if (!addr_ok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17035) /* Next, try NVRAM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17036) if (!tg3_flag(tp, NO_NVRAM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17037) !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17038) !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17039) memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17040) memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17042) /* Finally just fetch it out of the MAC control regs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17043) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17044) hi = tr32(MAC_ADDR_0_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17045) lo = tr32(MAC_ADDR_0_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17047) dev->dev_addr[5] = lo & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17048) dev->dev_addr[4] = (lo >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17049) dev->dev_addr[3] = (lo >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17050) dev->dev_addr[2] = (lo >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17051) dev->dev_addr[1] = hi & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17052) dev->dev_addr[0] = (hi >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17056) if (!is_valid_ether_addr(&dev->dev_addr[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17057) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17061) #define BOUNDARY_SINGLE_CACHELINE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17062) #define BOUNDARY_MULTI_CACHELINE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17064) static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17066) int cacheline_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17067) u8 byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17068) int goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17070) pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17071) if (byte == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17072) cacheline_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17073) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17074) cacheline_size = (int) byte * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17076) /* On 5703 and later chips, the boundary bits have no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17077) * effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17079) if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17080) tg3_asic_rev(tp) != ASIC_REV_5701 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17081) !tg3_flag(tp, PCI_EXPRESS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17082) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17084) #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17085) goal = BOUNDARY_MULTI_CACHELINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17086) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17087) #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17088) goal = BOUNDARY_SINGLE_CACHELINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17089) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17090) goal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17091) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17092) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17094) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17095) val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17096) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17099) if (!goal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17102) /* PCI controllers on most RISC systems tend to disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17103) * when a device tries to burst across a cache-line boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17104) * Therefore, letting tg3 do so just wastes PCI bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17106) * Unfortunately, for PCI-E there are only limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17107) * write-side controls for this, and thus for reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17108) * we will still get the disconnects. We'll also waste
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17109) * these PCI cycles for both read and write for chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17110) * other than 5700 and 5701 which do not implement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17111) * boundary bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17113) if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17114) switch (cacheline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17115) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17116) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17117) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17118) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17119) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17120) val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17121) DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17123) val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17124) DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17128) case 256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17129) val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17130) DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17131) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17133) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17134) val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17135) DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17138) } else if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17139) switch (cacheline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17140) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17141) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17142) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17143) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17144) val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17145) val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17148) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17149) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17150) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17151) val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17152) val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17155) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17156) switch (cacheline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17157) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17158) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17159) val |= (DMA_RWCTRL_READ_BNDRY_16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17160) DMA_RWCTRL_WRITE_BNDRY_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17163) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17164) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17165) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17166) val |= (DMA_RWCTRL_READ_BNDRY_32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17167) DMA_RWCTRL_WRITE_BNDRY_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17170) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17171) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17172) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17173) val |= (DMA_RWCTRL_READ_BNDRY_64 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17174) DMA_RWCTRL_WRITE_BNDRY_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17177) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17178) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17179) if (goal == BOUNDARY_SINGLE_CACHELINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17180) val |= (DMA_RWCTRL_READ_BNDRY_128 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17181) DMA_RWCTRL_WRITE_BNDRY_128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17182) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17184) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17185) case 256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17186) val |= (DMA_RWCTRL_READ_BNDRY_256 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17187) DMA_RWCTRL_WRITE_BNDRY_256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17189) case 512:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17190) val |= (DMA_RWCTRL_READ_BNDRY_512 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17191) DMA_RWCTRL_WRITE_BNDRY_512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17193) case 1024:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17194) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17195) val |= (DMA_RWCTRL_READ_BNDRY_1024 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17196) DMA_RWCTRL_WRITE_BNDRY_1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17197) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17201) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17202) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17205) static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17206) int size, bool to_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17208) struct tg3_internal_buffer_desc test_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17209) u32 sram_dma_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17210) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17212) sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17214) tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17215) tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17216) tw32(RDMAC_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17217) tw32(WDMAC_STATUS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17219) tw32(BUFMGR_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17220) tw32(FTQ_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17222) test_desc.addr_hi = ((u64) buf_dma) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17223) test_desc.addr_lo = buf_dma & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17224) test_desc.nic_mbuf = 0x00002100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17225) test_desc.len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17228) * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17229) * the *second* time the tg3 driver was getting loaded after an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17230) * initial scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17232) * Broadcom tells me:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17233) * ...the DMA engine is connected to the GRC block and a DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17234) * reset may affect the GRC block in some unpredictable way...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17235) * The behavior of resets to individual blocks has not been tested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17237) * Broadcom noted the GRC reset will also reset all sub-components.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17239) if (to_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17240) test_desc.cqid_sqid = (13 << 8) | 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17242) tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17243) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17245) test_desc.cqid_sqid = (16 << 8) | 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17247) tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17248) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17250) test_desc.flags = 0x00000005;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17252) for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17253) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17255) val = *(((u32 *)&test_desc) + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17256) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17257) sram_dma_descs + (i * sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17258) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17260) pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17262) if (to_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17263) tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17264) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17265) tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17267) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17268) for (i = 0; i < 40; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17269) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17271) if (to_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17272) val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17274) val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17275) if ((val & 0xffff) == sram_dma_descs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17276) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17280) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17283) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17286) #define TEST_BUFFER_SIZE 0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17288) static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17289) { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17290) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17293) static int tg3_test_dma(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17295) dma_addr_t buf_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17296) u32 *buf, saved_dma_rwctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17297) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17299) buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17300) &buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17301) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17302) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17303) goto out_nofree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17306) tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17307) (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17309) tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17311) if (tg3_flag(tp, 57765_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17314) if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17315) /* DMA read watermark not used on PCIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17316) tp->dma_rwctrl |= 0x00180000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17317) } else if (!tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17318) if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17319) tg3_asic_rev(tp) == ASIC_REV_5750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17320) tp->dma_rwctrl |= 0x003f0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17321) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17322) tp->dma_rwctrl |= 0x003f000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17323) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17324) if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17325) tg3_asic_rev(tp) == ASIC_REV_5704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17326) u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17327) u32 read_water = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17329) /* If the 5704 is behind the EPB bridge, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17330) * do the less restrictive ONE_DMA workaround for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17331) * better performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17333) if (tg3_flag(tp, 40BIT_DMA_BUG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17334) tg3_asic_rev(tp) == ASIC_REV_5704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17335) tp->dma_rwctrl |= 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17336) else if (ccval == 0x6 || ccval == 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17337) tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17339) if (tg3_asic_rev(tp) == ASIC_REV_5703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17340) read_water = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17341) /* Set bit 23 to enable PCIX hw bug fix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17342) tp->dma_rwctrl |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17343) (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17344) (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17345) (1 << 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17346) } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17347) /* 5780 always in PCIX mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17348) tp->dma_rwctrl |= 0x00144000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17349) } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17350) /* 5714 always in PCIX mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17351) tp->dma_rwctrl |= 0x00148000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17353) tp->dma_rwctrl |= 0x001b000f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17356) if (tg3_flag(tp, ONE_DMA_AT_ONCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17357) tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17359) if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17360) tg3_asic_rev(tp) == ASIC_REV_5704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17361) tp->dma_rwctrl &= 0xfffffff0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17363) if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17364) tg3_asic_rev(tp) == ASIC_REV_5701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17365) /* Remove this if it causes problems for some boards. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17366) tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17368) /* On 5700/5701 chips, we need to set this bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17369) * Otherwise the chip will issue cacheline transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17370) * to streamable DMA memory with not all the byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17371) * enables turned on. This is an error on several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17372) * RISC PCI controllers, in particular sparc64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17374) * On 5703/5704 chips, this bit has been reassigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17375) * a different meaning. In particular, it is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17376) * on those chips to enable a PCI-X workaround.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17378) tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17381) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17384) if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17385) tg3_asic_rev(tp) != ASIC_REV_5701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17386) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17388) /* It is best to perform DMA test with maximum write burst size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17389) * to expose the 5700/5701 write DMA bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17391) saved_dma_rwctrl = tp->dma_rwctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17392) tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17393) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17395) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17396) u32 *p = buf, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17398) for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17399) p[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17401) /* Send the buffer to the chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17402) ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17403) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17404) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17405) "%s: Buffer write failed. err = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17406) __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17410) /* Now read it back. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17411) ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17412) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17413) dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17414) "err = %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17418) /* Verify it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17419) for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17420) if (p[i] == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17421) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17423) if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17424) DMA_RWCTRL_WRITE_BNDRY_16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17425) tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17426) tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17427) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17429) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17430) dev_err(&tp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17431) "%s: Buffer corrupted on read back! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17432) "(%d != %d)\n", __func__, p[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17433) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17434) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17438) if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17439) /* Success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17440) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17444) if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17445) DMA_RWCTRL_WRITE_BNDRY_16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17446) /* DMA test passed without adjusting DMA boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17447) * now look for chipsets that are known to expose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17448) * DMA bug without failing the test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17450) if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17451) tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17452) tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17453) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17454) /* Safe to use the calculated DMA boundary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17455) tp->dma_rwctrl = saved_dma_rwctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17458) tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17461) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17462) dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17463) out_nofree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17464) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17467) static void tg3_init_bufmgr_config(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17469) if (tg3_flag(tp, 57765_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17470) tp->bufmgr_config.mbuf_read_dma_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17471) DEFAULT_MB_RDMA_LOW_WATER_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17472) tp->bufmgr_config.mbuf_mac_rx_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17473) DEFAULT_MB_MACRX_LOW_WATER_57765;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17474) tp->bufmgr_config.mbuf_high_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17475) DEFAULT_MB_HIGH_WATER_57765;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17477) tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17478) DEFAULT_MB_RDMA_LOW_WATER_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17479) tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17480) DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17481) tp->bufmgr_config.mbuf_high_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17482) DEFAULT_MB_HIGH_WATER_JUMBO_57765;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17483) } else if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17484) tp->bufmgr_config.mbuf_read_dma_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17485) DEFAULT_MB_RDMA_LOW_WATER_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17486) tp->bufmgr_config.mbuf_mac_rx_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17487) DEFAULT_MB_MACRX_LOW_WATER_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17488) tp->bufmgr_config.mbuf_high_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17489) DEFAULT_MB_HIGH_WATER_5705;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17490) if (tg3_asic_rev(tp) == ASIC_REV_5906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17491) tp->bufmgr_config.mbuf_mac_rx_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17492) DEFAULT_MB_MACRX_LOW_WATER_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17493) tp->bufmgr_config.mbuf_high_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17494) DEFAULT_MB_HIGH_WATER_5906;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17497) tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17498) DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17499) tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17500) DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17501) tp->bufmgr_config.mbuf_high_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17502) DEFAULT_MB_HIGH_WATER_JUMBO_5780;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17503) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17504) tp->bufmgr_config.mbuf_read_dma_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17505) DEFAULT_MB_RDMA_LOW_WATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17506) tp->bufmgr_config.mbuf_mac_rx_low_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17507) DEFAULT_MB_MACRX_LOW_WATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17508) tp->bufmgr_config.mbuf_high_water =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17509) DEFAULT_MB_HIGH_WATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17511) tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17512) DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17513) tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17514) DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17515) tp->bufmgr_config.mbuf_high_water_jumbo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17516) DEFAULT_MB_HIGH_WATER_JUMBO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17519) tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17520) tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17523) static char *tg3_phy_string(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17525) switch (tp->phy_id & TG3_PHY_ID_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17526) case TG3_PHY_ID_BCM5400: return "5400";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17527) case TG3_PHY_ID_BCM5401: return "5401";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17528) case TG3_PHY_ID_BCM5411: return "5411";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17529) case TG3_PHY_ID_BCM5701: return "5701";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17530) case TG3_PHY_ID_BCM5703: return "5703";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17531) case TG3_PHY_ID_BCM5704: return "5704";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17532) case TG3_PHY_ID_BCM5705: return "5705";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17533) case TG3_PHY_ID_BCM5750: return "5750";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17534) case TG3_PHY_ID_BCM5752: return "5752";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17535) case TG3_PHY_ID_BCM5714: return "5714";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17536) case TG3_PHY_ID_BCM5780: return "5780";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17537) case TG3_PHY_ID_BCM5755: return "5755";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17538) case TG3_PHY_ID_BCM5787: return "5787";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17539) case TG3_PHY_ID_BCM5784: return "5784";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17540) case TG3_PHY_ID_BCM5756: return "5722/5756";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17541) case TG3_PHY_ID_BCM5906: return "5906";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17542) case TG3_PHY_ID_BCM5761: return "5761";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17543) case TG3_PHY_ID_BCM5718C: return "5718C";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17544) case TG3_PHY_ID_BCM5718S: return "5718S";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17545) case TG3_PHY_ID_BCM57765: return "57765";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17546) case TG3_PHY_ID_BCM5719C: return "5719C";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17547) case TG3_PHY_ID_BCM5720C: return "5720C";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17548) case TG3_PHY_ID_BCM5762: return "5762C";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17549) case TG3_PHY_ID_BCM8002: return "8002/serdes";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17550) case 0: return "serdes";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17551) default: return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17555) static char *tg3_bus_string(struct tg3 *tp, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17557) if (tg3_flag(tp, PCI_EXPRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17558) strcpy(str, "PCI Express");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17559) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17560) } else if (tg3_flag(tp, PCIX_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17561) u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17563) strcpy(str, "PCIX:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17565) if ((clock_ctrl == 7) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17566) ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17567) GRC_MISC_CFG_BOARD_ID_5704CIOBE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17568) strcat(str, "133MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17569) else if (clock_ctrl == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17570) strcat(str, "33MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17571) else if (clock_ctrl == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17572) strcat(str, "50MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17573) else if (clock_ctrl == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17574) strcat(str, "66MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17575) else if (clock_ctrl == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17576) strcat(str, "100MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17578) strcpy(str, "PCI:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17579) if (tg3_flag(tp, PCI_HIGH_SPEED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17580) strcat(str, "66MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17581) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17582) strcat(str, "33MHz");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17584) if (tg3_flag(tp, PCI_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17585) strcat(str, ":32-bit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17586) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17587) strcat(str, ":64-bit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17588) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17591) static void tg3_init_coal(struct tg3 *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17593) struct ethtool_coalesce *ec = &tp->coal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17595) memset(ec, 0, sizeof(*ec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17596) ec->cmd = ETHTOOL_GCOALESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17597) ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17598) ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17599) ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17600) ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17601) ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17602) ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17603) ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17604) ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17605) ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17607) if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17608) HOSTCC_MODE_CLRTICK_TXBD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17609) ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17610) ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17611) ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17612) ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17615) if (tg3_flag(tp, 5705_PLUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17616) ec->rx_coalesce_usecs_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17617) ec->tx_coalesce_usecs_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17618) ec->stats_block_coalesce_usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17622) static int tg3_init_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17623) const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17625) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17626) struct tg3 *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17627) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17628) u32 sndmbx, rcvmbx, intmbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17629) char str[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17630) u64 dma_mask, persist_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17631) netdev_features_t features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17633) err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17634) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17635) dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17636) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17639) err = pci_request_regions(pdev, DRV_MODULE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17640) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17641) dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17642) goto err_out_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17645) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17647) dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17648) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17649) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17650) goto err_out_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17653) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17655) tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17656) tp->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17657) tp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17658) tp->rx_mode = TG3_DEF_RX_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17659) tp->tx_mode = TG3_DEF_TX_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17660) tp->irq_sync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17661) tp->pcierr_recovery = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17663) if (tg3_debug > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17664) tp->msg_enable = tg3_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17665) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17666) tp->msg_enable = TG3_DEF_MSG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17668) if (pdev_is_ssb_gige_core(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17669) tg3_flag_set(tp, IS_SSB_CORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17670) if (ssb_gige_must_flush_posted_writes(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17671) tg3_flag_set(tp, FLUSH_POSTED_WRITES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17672) if (ssb_gige_one_dma_at_once(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17673) tg3_flag_set(tp, ONE_DMA_AT_ONCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17674) if (ssb_gige_have_roboswitch(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17675) tg3_flag_set(tp, USE_PHYLIB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17676) tg3_flag_set(tp, ROBOSWITCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17678) if (ssb_gige_is_rgmii(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17679) tg3_flag_set(tp, RGMII_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17682) /* The word/byte swap controls here control register access byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17683) * swapping. DMA data byte swapping is controlled in the GRC_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17684) * setting below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17686) tp->misc_host_ctrl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17687) MISC_HOST_CTRL_MASK_PCI_INT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17688) MISC_HOST_CTRL_WORD_SWAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17689) MISC_HOST_CTRL_INDIR_ACCESS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17690) MISC_HOST_CTRL_PCISTATE_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17692) /* The NONFRM (non-frame) byte/word swap controls take effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17693) * on descriptor entries, anything which isn't packet data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17695) * The StrongARM chips on the board (one for tx, one for rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17696) * are running in big-endian mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17698) tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17699) GRC_MODE_WSWAP_NONFRM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17700) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17701) tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17702) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17703) spin_lock_init(&tp->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17704) spin_lock_init(&tp->indirect_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17705) INIT_WORK(&tp->reset_task, tg3_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17707) tp->regs = pci_ioremap_bar(pdev, BAR_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17708) if (!tp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17709) dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17710) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17711) goto err_out_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17714) if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17715) tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17716) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17717) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17718) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17719) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17720) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17721) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17722) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17723) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17724) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17725) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17726) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17727) tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17728) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17729) tg3_flag_set(tp, ENABLE_APE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17730) tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17731) if (!tp->aperegs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17732) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17733) "Cannot map APE registers, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17734) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17735) goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17739) tp->rx_pending = TG3_DEF_RX_RING_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17740) tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17742) dev->ethtool_ops = &tg3_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17743) dev->watchdog_timeo = TG3_TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17744) dev->netdev_ops = &tg3_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17745) dev->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17747) err = tg3_get_invariants(tp, ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17748) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17749) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17750) "Problem fetching invariants of chip, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17751) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17754) /* The EPB bridge inside 5714, 5715, and 5780 and any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17755) * device behind the EPB cannot support DMA addresses > 40-bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17756) * On 64-bit systems with IOMMU, use 40-bit dma_mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17757) * On 64-bit systems without IOMMU, use 64-bit dma_mask and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17758) * do DMA address check in tg3_start_xmit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17760) if (tg3_flag(tp, IS_5788))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17761) persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17762) else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17763) persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17764) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17765) dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17766) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17767) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17768) persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17770) /* Configure DMA attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17771) if (dma_mask > DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17772) err = pci_set_dma_mask(pdev, dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17773) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17774) features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17775) err = pci_set_consistent_dma_mask(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17776) persist_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17777) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17778) dev_err(&pdev->dev, "Unable to obtain 64 bit "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17779) "DMA for consistent allocations\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17780) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17784) if (err || dma_mask == DMA_BIT_MASK(32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17785) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17786) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17787) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17788) "No usable DMA configuration, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17789) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17793) tg3_init_bufmgr_config(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17795) /* 5700 B0 chips do not support checksumming correctly due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17796) * to hardware bugs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17798) if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17799) features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17801) if (tg3_flag(tp, 5755_PLUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17802) features |= NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17805) /* TSO is on by default on chips that support hardware TSO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17806) * Firmware TSO on older chips gives lower performance, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17807) * is off by default, but can be enabled using ethtool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17809) if ((tg3_flag(tp, HW_TSO_1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17810) tg3_flag(tp, HW_TSO_2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17811) tg3_flag(tp, HW_TSO_3)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17812) (features & NETIF_F_IP_CSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17813) features |= NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17814) if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17815) if (features & NETIF_F_IPV6_CSUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17816) features |= NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17817) if (tg3_flag(tp, HW_TSO_3) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17818) tg3_asic_rev(tp) == ASIC_REV_5761 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17819) (tg3_asic_rev(tp) == ASIC_REV_5784 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17820) tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17821) tg3_asic_rev(tp) == ASIC_REV_5785 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17822) tg3_asic_rev(tp) == ASIC_REV_57780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17823) features |= NETIF_F_TSO_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17826) dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17827) NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17828) dev->vlan_features |= features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17831) * Add loopback capability only for a subset of devices that support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17832) * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17833) * loopback for the remaining devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17835) if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17836) !tg3_flag(tp, CPMU_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17837) /* Add the loopback capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17838) features |= NETIF_F_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17840) dev->hw_features |= features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17841) dev->priv_flags |= IFF_UNICAST_FLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17843) /* MTU range: 60 - 9000 or 1500, depending on hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17844) dev->min_mtu = TG3_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17845) dev->max_mtu = TG3_MAX_MTU(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17847) if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17848) !tg3_flag(tp, TSO_CAPABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17849) !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17850) tg3_flag_set(tp, MAX_RXPEND_64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17851) tp->rx_pending = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17854) err = tg3_get_device_address(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17855) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17856) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17857) "Could not obtain valid ethernet address, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17858) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17861) intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17862) rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17863) sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17864) for (i = 0; i < tp->irq_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17865) struct tg3_napi *tnapi = &tp->napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17867) tnapi->tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17868) tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17870) tnapi->int_mbox = intmbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17871) if (i <= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17872) intmbx += 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17873) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17874) intmbx += 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17876) tnapi->consmbox = rcvmbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17877) tnapi->prodmbox = sndmbx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17879) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17880) tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17881) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17882) tnapi->coal_now = HOSTCC_MODE_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17884) if (!tg3_flag(tp, SUPPORT_MSIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17888) * If we support MSIX, we'll be using RSS. If we're using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17889) * RSS, the first vector only handles link interrupts and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17890) * remaining vectors handle rx and tx interrupts. Reuse the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17891) * mailbox values for the next iteration. The values we setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17892) * above are still useful for the single vectored mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17894) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17895) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17897) rcvmbx += 0x8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17899) if (sndmbx & 0x4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17900) sndmbx -= 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17901) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17902) sndmbx += 0xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17905) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17906) * Reset chip in case UNDI or EFI driver did not shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17907) * DMA self test will enable WDMAC and we'll see (spurious)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17908) * pending DMA on the PCI bus at that point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17910) if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17911) (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17912) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17913) tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17914) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17915) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17918) err = tg3_test_dma(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17919) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17920) dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17921) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17924) tg3_init_coal(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17926) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17928) if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17929) tg3_asic_rev(tp) == ASIC_REV_5720 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17930) tg3_asic_rev(tp) == ASIC_REV_5762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17931) tg3_flag_set(tp, PTP_CAPABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17933) tg3_timer_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17935) tg3_carrier_off(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17937) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17938) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17939) dev_err(&pdev->dev, "Cannot register net device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17940) goto err_out_apeunmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17943) if (tg3_flag(tp, PTP_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17944) tg3_ptp_init(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17945) tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17946) &tp->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17947) if (IS_ERR(tp->ptp_clock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17948) tp->ptp_clock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17951) netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17952) tp->board_part_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17953) tg3_chip_rev_id(tp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17954) tg3_bus_string(tp, str),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17955) dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17957) if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17958) char *ethtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17960) if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17961) ethtype = "10/100Base-TX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17962) else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17963) ethtype = "1000Base-SX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17964) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17965) ethtype = "10/100/1000Base-T";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17967) netdev_info(dev, "attached PHY is %s (%s Ethernet) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17968) "(WireSpeed[%d], EEE[%d])\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17969) tg3_phy_string(tp), ethtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17970) (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17971) (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17974) netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17975) (dev->features & NETIF_F_RXCSUM) != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17976) tg3_flag(tp, USE_LINKCHG_REG) != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17977) (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17978) tg3_flag(tp, ENABLE_ASF) != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17979) tg3_flag(tp, TSO_CAPABLE) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17980) netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17981) tp->dma_rwctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17982) pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17983) ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17985) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17989) err_out_apeunmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17990) if (tp->aperegs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17991) iounmap(tp->aperegs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17992) tp->aperegs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17995) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17996) if (tp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17997) iounmap(tp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17998) tp->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18001) err_out_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18002) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18004) err_out_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18005) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18007) err_out_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18008) if (pci_is_enabled(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18009) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18010) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18013) static void tg3_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18015) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18017) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18018) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18020) tg3_ptp_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18022) release_firmware(tp->fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18024) tg3_reset_task_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18026) if (tg3_flag(tp, USE_PHYLIB)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18027) tg3_phy_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18028) tg3_mdio_fini(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18031) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18032) if (tp->aperegs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18033) iounmap(tp->aperegs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18034) tp->aperegs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18036) if (tp->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18037) iounmap(tp->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18038) tp->regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18040) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18041) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18042) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18046) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18047) static int tg3_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18049) struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18050) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18051) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18053) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18055) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18056) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18058) tg3_reset_task_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18059) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18060) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18062) tg3_timer_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18064) tg3_full_lock(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18065) tg3_disable_ints(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18066) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18068) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18070) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18071) tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18072) tg3_flag_clear(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18073) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18075) err = tg3_power_down_prepare(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18076) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18077) int err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18079) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18081) tg3_flag_set(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18082) err2 = tg3_restart_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18083) if (err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18084) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18086) tg3_timer_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18088) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18089) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18091) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18092) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18094) if (!err2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18095) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18098) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18099) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18100) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18103) static int tg3_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18105) struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18106) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18107) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18109) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18111) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18112) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18114) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18116) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18118) tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18120) tg3_flag_set(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18121) err = tg3_restart_hw(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18122) !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18123) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18124) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18126) tg3_timer_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18128) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18131) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18133) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18134) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18136) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18137) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18138) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18140) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18142) static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18144) static void tg3_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18146) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18147) struct tg3 *tp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18149) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18150) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18152) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18153) dev_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18155) if (system_state == SYSTEM_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18156) tg3_power_down(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18158) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18162) * tg3_io_error_detected - called when PCI error is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18163) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18164) * @state: The current pci connection state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18166) * This function is called after a PCI bus error affecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18167) * this device has been detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18169) static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18170) pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18172) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18173) struct tg3 *tp = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18174) pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18176) netdev_info(netdev, "PCI I/O error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18178) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18180) /* Could be second call or maybe we don't have netdev yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18181) if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18182) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18184) /* We needn't recover from permanent error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18185) if (state == pci_channel_io_frozen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18186) tp->pcierr_recovery = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18188) tg3_phy_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18190) tg3_netif_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18192) tg3_timer_stop(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18194) /* Want to make sure that the reset task doesn't run */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18195) tg3_reset_task_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18197) netif_device_detach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18199) /* Clean up software state, even if MMIO is blocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18200) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18201) tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18202) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18204) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18205) if (state == pci_channel_io_perm_failure) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18206) if (netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18207) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18208) dev_close(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18210) err = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18212) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18215) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18217) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18221) * tg3_io_slot_reset - called after the pci bus has been reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18222) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18224) * Restart the card from scratch, as if from a cold-boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18225) * At this point, the card has exprienced a hard reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18226) * followed by fixups by BIOS, and has its config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18227) * set up identically to what it was at cold boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18229) static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18231) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18232) struct tg3 *tp = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18233) pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18234) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18236) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18238) if (pci_enable_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18239) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18240) "Cannot re-enable PCI device after reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18241) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18244) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18245) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18246) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18248) if (!netdev || !netif_running(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18249) rc = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18250) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18253) err = tg3_power_up(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18254) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18255) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18257) rc = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18259) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18260) if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18261) tg3_napi_enable(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18262) dev_close(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18264) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18266) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18270) * tg3_io_resume - called when traffic can start flowing again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18271) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18272) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18273) * This callback is called when the error recovery driver tells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18274) * us that its OK to resume normal operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18276) static void tg3_io_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18278) struct net_device *netdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18279) struct tg3 *tp = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18280) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18282) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18284) if (!netdev || !netif_running(netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18285) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18287) tg3_full_lock(tp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18288) tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18289) tg3_flag_set(tp, INIT_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18290) err = tg3_restart_hw(tp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18291) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18292) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18293) netdev_err(netdev, "Cannot restart hardware after reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18294) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18297) netif_device_attach(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18299) tg3_timer_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18301) tg3_netif_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18303) tg3_full_unlock(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18305) tg3_phy_start(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18307) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18308) tp->pcierr_recovery = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18309) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18312) static const struct pci_error_handlers tg3_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18313) .error_detected = tg3_io_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18314) .slot_reset = tg3_io_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18315) .resume = tg3_io_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18316) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18318) static struct pci_driver tg3_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18319) .name = DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18320) .id_table = tg3_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18321) .probe = tg3_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18322) .remove = tg3_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18323) .err_handler = &tg3_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18324) .driver.pm = &tg3_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18325) .shutdown = tg3_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18328) module_pci_driver(tg3_driver);