^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* bnx2.c: QLogic bnx2 network driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2004-2014 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2014-2015 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Written by: Michael Chan (mchan@broadcom.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/mii.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/if.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/aer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #if IS_ENABLED(CONFIG_CNIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define BCM_CNIC 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "cnic_if.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "bnx2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "bnx2_fw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DRV_MODULE_NAME "bnx2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define RUN_AT(x) (jiffies + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Time in jiffies before concluding the transmitter is hung. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define TX_TIMEOUT (5*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_FIRMWARE(FW_MIPS_FILE_06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_FIRMWARE(FW_RV2P_FILE_06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_FIRMWARE(FW_MIPS_FILE_09);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) MODULE_FIRMWARE(FW_RV2P_FILE_09);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int disable_msi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) module_param(disable_msi, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) typedef enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) BCM5706 = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) NC370T,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) NC370I,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) BCM5706S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) NC370F,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) BCM5708,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) BCM5708S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) BCM5709,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) BCM5709S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) BCM5716,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) BCM5716S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } board_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* indexed by board_t, above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) } board_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { "Broadcom NetXtreme II BCM5706 1000Base-T" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) { "HP NC370T Multifunction Gigabit Server Adapter" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) { "HP NC370i Multifunction Gigabit Server Adapter" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { "HP NC370F Multifunction Gigabit Server Adapter" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) { "Broadcom NetXtreme II BCM5708 1000Base-T" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) { "Broadcom NetXtreme II BCM5709 1000Base-T" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) { "Broadcom NetXtreme II BCM5716 1000Base-T" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static const struct pci_device_id bnx2_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) { PCI_VENDOR_ID_BROADCOM, 0x163b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) { PCI_VENDOR_ID_BROADCOM, 0x163c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) { 0, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static const struct flash_spec flash_table[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Slow EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "EEPROM - slow"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Expansion entry 0001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "Entry 0001"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Saifun SA25F010 (non-buffered flash) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* strap, cfg1, & write1 need updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "Non-buffered flash (128kB)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Saifun SA25F020 (non-buffered flash) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* strap, cfg1, & write1 need updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) "Non-buffered flash (256kB)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Expansion entry 0100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) "Entry 0100"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Entry 0110: ST M45PE20 (non-buffered flash)*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Saifun SA25F005 (non-buffered flash) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* strap, cfg1, & write1 need updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) "Non-buffered flash (64kB)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Fast EEPROM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) "EEPROM - fast"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Expansion entry 1001 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) "Entry 1001"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Expansion entry 1010 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) "Entry 1010"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* ATMEL AT45DB011B (buffered flash) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "Buffered flash (128kB)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Expansion entry 1100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "Entry 1100"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Expansion entry 1101 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) "Entry 1101"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Ateml Expansion entry 1110 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "Entry 1110 (Atmel)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* ATMEL AT45DB021B (buffered flash) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "Buffered flash (256kB)"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static const struct flash_spec flash_5709 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .flags = BNX2_NV_BUFFERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .page_bits = BCM5709_FLASH_PAGE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .page_size = BCM5709_FLASH_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) .name = "5709 Buffered flash (256kB)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void bnx2_init_napi(struct bnx2 *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void bnx2_del_napi(struct bnx2 *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) u32 diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* The ring uses 256 indices for 255 entries, one of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * needs to be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) diff &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (diff == BNX2_TX_DESC_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) diff = BNX2_MAX_TX_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return bp->tx_ring_size - diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_lock_irqsave(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_unlock_irqrestore(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) spin_lock_irqsave(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) spin_unlock_irqrestore(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) offset += cid_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) spin_lock_irqsave(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) BNX2_WR(bp, BNX2_CTX_DATA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spin_unlock_irqrestore(&bp->indirect_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct drv_ctl_io *io = &info->data.io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) switch (info->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case DRV_CTL_IO_WR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) bnx2_reg_wr_ind(bp, io->offset, io->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case DRV_CTL_IO_RD_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) io->data = bnx2_reg_rd_ind(bp, io->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case DRV_CTL_CTX_WR_CMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int sb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (bp->flags & BNX2_FLAG_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) bnapi->cnic_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) sb_id = bp->irq_nvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) bnapi->cnic_tag = bnapi->last_status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) bnapi->cnic_present = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) sb_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) cp->irq_arr[0].status_blk = (void *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ((unsigned long) bnapi->status_blk.msi +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) cp->irq_arr[0].status_blk_num = sb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) cp->num_irq = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (cp->drv_state & CNIC_DRV_STATE_REGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) bp->cnic_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) rcu_assign_pointer(bp->cnic_ops, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) cp->num_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) cp->drv_state = CNIC_DRV_STATE_REGD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bnx2_setup_cnic_irq_info(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int bnx2_unregister_cnic(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mutex_lock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) cp->drv_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) bnapi->cnic_present = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) RCU_INIT_POINTER(bp->cnic_ops, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mutex_unlock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!cp->max_iscsi_conn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) cp->drv_owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) cp->chip_id = bp->chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) cp->pdev = bp->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) cp->io_base = bp->regview;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cp->drv_ctl = bnx2_drv_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) cp->drv_register_cnic = bnx2_register_cnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cp->drv_unregister_cnic = bnx2_unregister_cnic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) bnx2_cnic_stop(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct cnic_ops *c_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct cnic_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) mutex_lock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) c_ops = rcu_dereference_protected(bp->cnic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) lockdep_is_held(&bp->cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (c_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) info.cmd = CNIC_CTL_STOP_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) c_ops->cnic_ctl(bp->cnic_data, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) mutex_unlock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) bnx2_cnic_start(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct cnic_ops *c_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct cnic_ctl_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mutex_lock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) c_ops = rcu_dereference_protected(bp->cnic_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) lockdep_is_held(&bp->cnic_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (c_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) bnapi->cnic_tag = bnapi->last_status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) info.cmd = CNIC_CTL_START_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) c_ops->cnic_ctl(bp->cnic_data, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) mutex_unlock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bnx2_cnic_stop(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) bnx2_cnic_start(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) u32 val1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) val1 = (bp->phy_addr << 21) | (reg << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) BNX2_EMAC_MDIO_COMM_START_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for (i = 0; i < 50; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) val1 &= BNX2_EMAC_MDIO_COMM_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) *val = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *val = val1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) u32 val1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) val1 = (bp->phy_addr << 21) | (reg << 16) | val |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) for (i = 0; i < 50; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) udelay(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bnx2_disable_int(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bnx2_enable_int(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) bnx2_disable_int_sync(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) atomic_inc(&bp->intr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) bnx2_disable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) for (i = 0; i < bp->irq_nvecs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) synchronize_irq(bp->irq_tbl[i].vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bnx2_napi_disable(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) for (i = 0; i < bp->irq_nvecs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) napi_disable(&bp->bnx2_napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) bnx2_napi_enable(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for (i = 0; i < bp->irq_nvecs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) napi_enable(&bp->bnx2_napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (stop_cnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) bnx2_cnic_stop(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) bnx2_napi_disable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) netif_tx_disable(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) bnx2_disable_int_sync(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) netif_carrier_off(bp->dev); /* prevent tx timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (atomic_dec_and_test(&bp->intr_sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) netif_tx_wake_all_queues(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (bp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) netif_carrier_on(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) bnx2_napi_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) bnx2_enable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (start_cnic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) bnx2_cnic_start(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) bnx2_free_tx_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) for (i = 0; i < bp->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (txr->tx_desc_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) txr->tx_desc_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) txr->tx_desc_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) txr->tx_desc_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) kfree(txr->tx_buf_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) txr->tx_buf_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) bnx2_free_rx_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) for (i = 0; i < bp->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) for (j = 0; j < bp->rx_max_ring; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (rxr->rx_desc_ring[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) rxr->rx_desc_ring[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) rxr->rx_desc_mapping[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rxr->rx_desc_ring[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) vfree(rxr->rx_buf_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rxr->rx_buf_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) for (j = 0; j < bp->rx_max_pg_ring; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (rxr->rx_pg_desc_ring[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) rxr->rx_pg_desc_ring[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) rxr->rx_pg_desc_mapping[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) rxr->rx_pg_desc_ring[j] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) vfree(rxr->rx_pg_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rxr->rx_pg_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) bnx2_alloc_tx_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) for (i = 0; i < bp->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (!txr->tx_buf_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) txr->tx_desc_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) &txr->tx_desc_mapping, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (!txr->tx_desc_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) bnx2_alloc_rx_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) for (i = 0; i < bp->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) rxr->rx_buf_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!rxr->rx_buf_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) for (j = 0; j < bp->rx_max_ring; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) rxr->rx_desc_ring[j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) dma_alloc_coherent(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) RXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) &rxr->rx_desc_mapping[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!rxr->rx_desc_ring[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (bp->rx_pg_ring_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rxr->rx_pg_ring =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) vzalloc(array_size(SW_RXPG_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bp->rx_max_pg_ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (!rxr->rx_pg_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) for (j = 0; j < bp->rx_max_pg_ring; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) rxr->rx_pg_desc_ring[j] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dma_alloc_coherent(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) RXBD_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) &rxr->rx_pg_desc_mapping[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (!rxr->rx_pg_desc_ring[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) bnx2_free_stats_blk(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (bp->status_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) bp->status_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) bp->status_blk_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) bp->status_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) bp->stats_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) bnx2_alloc_stats_blk(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int status_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) void *status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* Combine status and statistics blocks into one allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (bp->flags & BNX2_FLAG_MSIX_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) BNX2_SBLK_MSIX_ALIGN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bp->status_stats_size = status_blk_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) sizeof(struct statistics_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) &bp->status_blk_mapping, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!status_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) bp->status_blk = status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) bp->stats_blk = status_blk + status_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bnx2_free_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) bnx2_free_tx_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) bnx2_free_rx_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) for (i = 0; i < bp->ctx_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (bp->ctx_blk[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) bp->ctx_blk[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) bp->ctx_blk_mapping[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) bp->ctx_blk[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (bnapi->status_blk.msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) bnapi->status_blk.msi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bnx2_alloc_mem(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) bnapi->status_blk.msi = bp->status_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) bnapi->hw_tx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) bnapi->hw_rx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (bp->flags & BNX2_FLAG_MSIX_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) for (i = 1; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct status_block_msix *sblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) bnapi->status_blk.msix = sblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) bnapi->hw_tx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) &sblk->status_tx_quick_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) bnapi->hw_rx_cons_ptr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) &sblk->status_rx_quick_consumer_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) bnapi->int_num = i << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (bp->ctx_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bp->ctx_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) for (i = 0; i < bp->ctx_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) BNX2_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) &bp->ctx_blk_mapping[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!bp->ctx_blk[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) goto alloc_mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) err = bnx2_alloc_rx_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) goto alloc_mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) err = bnx2_alloc_tx_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) goto alloc_mem_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) alloc_mem_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) bnx2_free_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) bnx2_report_fw_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) u32 fw_link_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) u32 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) switch (bp->line_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (bp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) fw_link_status = BNX2_LINK_STATUS_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) fw_link_status = BNX2_LINK_STATUS_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (bp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) fw_link_status = BNX2_LINK_STATUS_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) fw_link_status = BNX2_LINK_STATUS_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (bp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) fw_link_status = BNX2_LINK_STATUS_1000HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) fw_link_status = BNX2_LINK_STATUS_1000FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) case SPEED_2500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (bp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) fw_link_status = BNX2_LINK_STATUS_2500HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) fw_link_status = BNX2_LINK_STATUS_2500FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (bp->autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!(bmsr & BMSR_ANEGCOMPLETE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) bnx2_xceiver_str(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) "Copper");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) bnx2_report_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) netif_carrier_on(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) bnx2_xceiver_str(bp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) bp->line_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) bp->duplex == DUPLEX_FULL ? "full" : "half");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (bp->flow_ctrl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (bp->flow_ctrl & FLOW_CTRL_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) pr_cont(", receive ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (bp->flow_ctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) pr_cont("& transmit ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pr_cont(", transmit ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) pr_cont("flow control ON");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) netif_carrier_off(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) netdev_err(bp->dev, "NIC %s Link is Down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) bnx2_xceiver_str(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) bnx2_report_fw_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) bnx2_resolve_flow_ctrl(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) u32 local_adv, remote_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) bp->flow_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (bp->duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) bp->flow_ctrl = bp->req_flow_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (bp->duplex != DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) bp->flow_ctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) bp->flow_ctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) bnx2_read_phy(bp, bp->mii_adv, &local_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u32 new_local_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) u32 new_remote_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (local_adv & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) new_local_adv |= ADVERTISE_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (local_adv & ADVERTISE_1000XPSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) new_local_adv |= ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (remote_adv & ADVERTISE_1000XPAUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) new_remote_adv |= ADVERTISE_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (remote_adv & ADVERTISE_1000XPSE_ASYM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) new_remote_adv |= ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) local_adv = new_local_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) remote_adv = new_remote_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) /* See Table 28B-3 of 802.3ab-1999 spec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (local_adv & ADVERTISE_PAUSE_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if(local_adv & ADVERTISE_PAUSE_ASYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (remote_adv & ADVERTISE_PAUSE_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) bp->flow_ctrl = FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (remote_adv & ADVERTISE_PAUSE_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) else if (local_adv & ADVERTISE_PAUSE_ASYM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) (remote_adv & ADVERTISE_PAUSE_ASYM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) bp->flow_ctrl = FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) bnx2_5709s_linkup(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) u32 val, speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if ((bp->autoneg & AUTONEG_SPEED) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) bp->line_speed = bp->req_line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) bp->duplex = bp->req_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) case MII_BNX2_GP_TOP_AN_SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) case MII_BNX2_GP_TOP_AN_SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case MII_BNX2_GP_TOP_AN_SPEED_1G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) bp->line_speed = SPEED_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (val & MII_BNX2_GP_TOP_AN_FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) bnx2_5708s_linkup(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) case BCM5708S_1000X_STAT1_SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) case BCM5708S_1000X_STAT1_SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case BCM5708S_1000X_STAT1_SPEED_1G:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) case BCM5708S_1000X_STAT1_SPEED_2G5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) bp->line_speed = SPEED_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (val & BCM5708S_1000X_STAT1_FD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) bnx2_5706s_linkup(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) u32 bmcr, local_adv, remote_adv, common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (bmcr & BMCR_FULLDPLX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (!(bmcr & BMCR_ANENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) bnx2_read_phy(bp, bp->mii_adv, &local_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) common = local_adv & remote_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (common & ADVERTISE_1000XFULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) bnx2_copper_linkup(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) u32 local_adv, remote_adv, common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) common = local_adv & (remote_adv >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (common & ADVERTISE_1000FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) else if (common & ADVERTISE_1000HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) bnx2_read_phy(bp, bp->mii_adv, &local_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) common = local_adv & remote_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (common & ADVERTISE_100FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) else if (common & ADVERTISE_100HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) else if (common & ADVERTISE_10FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) else if (common & ADVERTISE_10HALF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) bp->line_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) bp->link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (bmcr & BMCR_SPEED100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (bmcr & BMCR_FULLDPLX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) u32 ext_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ext_status & EXT_STATUS_MDIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) u32 val, rx_cid_addr = GET_CID_ADDR(cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) val |= 0x02 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (bp->flow_ctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) bnx2_init_all_rx_contexts(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) u32 cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (i == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) cid = RX_RSS_CID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) bnx2_init_rx_context(bp, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) bnx2_set_mac_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (bp->link_up && (bp->line_speed == SPEED_1000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) (bp->duplex == DUPLEX_HALF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) /* Configure the EMAC mode register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) val = BNX2_RD(bp, BNX2_EMAC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) BNX2_EMAC_MODE_25G_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) switch (bp->line_speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) case SPEED_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) val |= BNX2_EMAC_MODE_PORT_MII_10M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) case SPEED_100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) val |= BNX2_EMAC_MODE_PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) case SPEED_2500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) val |= BNX2_EMAC_MODE_25G_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) case SPEED_1000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) val |= BNX2_EMAC_MODE_PORT_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) val |= BNX2_EMAC_MODE_PORT_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* Set the MAC to operate in the appropriate duplex mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (bp->duplex == DUPLEX_HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) val |= BNX2_EMAC_MODE_HALF_DUPLEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) BNX2_WR(bp, BNX2_EMAC_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) /* Enable/disable rx PAUSE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (bp->flow_ctrl & FLOW_CTRL_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* Enable/disable tx PAUSE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (bp->flow_ctrl & FLOW_CTRL_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) val |= BNX2_EMAC_TX_MODE_FLOW_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* Acknowledge the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) bnx2_init_all_rx_contexts(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) bnx2_enable_bmsr1(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) (BNX2_CHIP(bp) == BNX2_CHIP_5709))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) MII_BNX2_BLK_ADDR_GP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) bnx2_disable_bmsr1(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) (BNX2_CHIP(bp) == BNX2_CHIP_5709))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) bnx2_test_and_enable_2g5(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) u32 up1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (bp->autoneg & AUTONEG_SPEED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) bp->advertising |= ADVERTISED_2500baseX_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) bnx2_read_phy(bp, bp->mii_up1, &up1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (!(up1 & BCM5708S_UP1_2G5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) up1 |= BCM5708S_UP1_2G5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) bnx2_write_phy(bp, bp->mii_up1, up1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) bnx2_test_and_disable_2g5(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) u32 up1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) bnx2_read_phy(bp, bp->mii_up1, &up1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (up1 & BCM5708S_UP1_2G5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) up1 &= ~BCM5708S_UP1_2G5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) bnx2_write_phy(bp, bp->mii_up1, up1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) bnx2_enable_forced_2g5(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) MII_BNX2_BLK_ADDR_SERDES_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) val |= MII_BNX2_SD_MISC1_FORCE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) MII_BNX2_SD_MISC1_FORCE_2_5G;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) bmcr |= BCM5708S_BMCR_FORCE_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (bp->autoneg & AUTONEG_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) bmcr &= ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (bp->req_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) bnx2_disable_forced_2g5(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) MII_BNX2_BLK_ADDR_SERDES_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) val &= ~MII_BNX2_SD_MISC1_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) bmcr &= ~BCM5708S_BMCR_FORCE_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (bp->autoneg & AUTONEG_SPEED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) bnx2_set_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) u32 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) u8 link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) link_up = bp->link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) bnx2_enable_bmsr1(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) bnx2_disable_bmsr1(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u32 val, an_dbg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) bnx2_5706s_force_link_dn(bp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) val = BNX2_RD(bp, BNX2_EMAC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if ((val & BNX2_EMAC_STATUS_LINK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) bmsr |= BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) bmsr &= ~BMSR_LSTATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) bnx2_5706s_linkup(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) bnx2_5708s_linkup(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) bnx2_5709s_linkup(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) bnx2_copper_linkup(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) (bp->autoneg & AUTONEG_SPEED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) bnx2_disable_forced_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) bmcr |= BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) bp->link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (bp->link_up != link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) bnx2_report_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) bnx2_reset_phy(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) #define PHY_RESET_MAX_WAIT 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) bnx2_read_phy(bp, bp->mii_bmcr, ®);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!(reg & BMCR_RESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (i == PHY_RESET_MAX_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) bnx2_phy_get_pause_adv(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) u32 adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) adv = ADVERTISE_1000XPAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) adv = ADVERTISE_PAUSE_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) adv = ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) adv = ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) return adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) __releases(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) __acquires(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) u32 speed_arg = 0, pause_adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) pause_adv = bnx2_phy_get_pause_adv(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (bp->autoneg & AUTONEG_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (bp->advertising & ADVERTISED_10baseT_Half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (bp->advertising & ADVERTISED_10baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (bp->advertising & ADVERTISED_100baseT_Half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (bp->advertising & ADVERTISED_100baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (bp->advertising & ADVERTISED_1000baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if (bp->advertising & ADVERTISED_2500baseX_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (bp->req_line_speed == SPEED_2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) else if (bp->req_line_speed == SPEED_1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) else if (bp->req_line_speed == SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (bp->req_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) } else if (bp->req_line_speed == SPEED_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (bp->req_duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (port == PORT_TP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) __releases(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) __acquires(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) u32 adv, bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) u32 new_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return bnx2_setup_remote_phy(bp, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (!(bp->autoneg & AUTONEG_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) u32 new_bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) int force_link_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (bp->req_line_speed == SPEED_2500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (!bnx2_test_and_enable_2g5(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) force_link_down = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) } else if (bp->req_line_speed == SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (bnx2_test_and_disable_2g5(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) force_link_down = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) bnx2_read_phy(bp, bp->mii_adv, &adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) new_bmcr = bmcr & ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) new_bmcr |= BMCR_SPEED1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (bp->req_line_speed == SPEED_2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) bnx2_enable_forced_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) else if (bp->req_line_speed == SPEED_1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) bnx2_disable_forced_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) new_bmcr &= ~0x2000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (bp->req_line_speed == SPEED_2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) new_bmcr |= BCM5708S_BMCR_FORCE_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (bp->req_duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) adv |= ADVERTISE_1000XFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) new_bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) adv |= ADVERTISE_1000XHALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) new_bmcr &= ~BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if ((new_bmcr != bmcr) || (force_link_down)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) /* Force a link down visible on the other side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) bnx2_write_phy(bp, bp->mii_adv, adv &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) ~(ADVERTISE_1000XFULL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) ADVERTISE_1000XHALF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) BMCR_ANRESTART | BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) bp->link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) netif_carrier_off(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) bnx2_report_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) bnx2_write_phy(bp, bp->mii_adv, adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) bnx2_test_and_enable_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (bp->advertising & ADVERTISED_1000baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) new_adv |= ADVERTISE_1000XFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) new_adv |= bnx2_phy_get_pause_adv(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) bnx2_read_phy(bp, bp->mii_adv, &adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) bp->serdes_an_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /* Force a link down visible on the other side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) bnx2_write_phy(bp, bp->mii_adv, new_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /* Speed up link-up time when the link partner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * does not autonegotiate which is very common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * in blade servers. Some blade servers use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * IPMI for kerboard input and it's important
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * to minimize link disruptions. Autoneg. involves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * exchanging base pages plus 3 next pages and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) * normally completes in about 120 msec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) bp->serdes_an_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) mod_timer(&bp->timer, jiffies + bp->current_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) #define ETHTOOL_ALL_FIBRE_SPEED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) (ADVERTISED_1000baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) #define ETHTOOL_ALL_COPPER_SPEED \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) ADVERTISED_1000baseT_Full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) bnx2_set_default_remote_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (bp->phy_port == PORT_TP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) bp->req_line_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) bp->autoneg |= AUTONEG_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) bp->advertising = ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) bp->advertising |= ADVERTISED_10baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) bp->advertising |= ADVERTISED_10baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) bp->advertising |= ADVERTISED_100baseT_Half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) bp->advertising |= ADVERTISED_100baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) bp->advertising |= ADVERTISED_1000baseT_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) bp->advertising |= ADVERTISED_2500baseX_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) bp->autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) bp->advertising = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) bp->req_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) bp->req_line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) bp->req_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) bp->req_line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) bp->req_duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) bp->req_line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) bp->req_line_speed = SPEED_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) bnx2_set_default_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) bnx2_set_default_remote_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) bp->req_line_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) bp->autoneg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) bp->req_line_speed = bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) bp->req_duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) bnx2_send_heart_beat(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) spin_lock(&bp->indirect_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) spin_unlock(&bp->indirect_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) bnx2_remote_phy_event(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) u8 link_up = bp->link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) u8 old_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) bnx2_send_heart_beat(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) bp->link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) bp->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) switch (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) case BNX2_LINK_STATUS_10HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) case BNX2_LINK_STATUS_10FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) bp->line_speed = SPEED_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) case BNX2_LINK_STATUS_100HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) case BNX2_LINK_STATUS_100BASE_T4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) case BNX2_LINK_STATUS_100FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) bp->line_speed = SPEED_100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) case BNX2_LINK_STATUS_1000HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) case BNX2_LINK_STATUS_1000FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) bp->line_speed = SPEED_1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) case BNX2_LINK_STATUS_2500HALF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) bp->duplex = DUPLEX_HALF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case BNX2_LINK_STATUS_2500FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) bp->line_speed = SPEED_2500;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) bp->line_speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) bp->flow_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (bp->duplex == DUPLEX_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) bp->flow_ctrl = bp->req_flow_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) bp->flow_ctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) bp->flow_ctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) old_port = bp->phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (msg & BNX2_LINK_STATUS_SERDES_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) bp->phy_port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) bp->phy_port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (old_port != bp->phy_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) bnx2_set_default_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (bp->link_up != link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) bnx2_report_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) bnx2_set_remote_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) u32 evt_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) switch (evt_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) case BNX2_FW_EVT_CODE_LINK_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) bnx2_remote_phy_event(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) bnx2_send_heart_beat(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) bnx2_setup_copper_phy(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) __releases(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) __acquires(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) u32 bmcr, adv_reg, new_adv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) u32 new_bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) ADVERTISE_PAUSE_ASYM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (bp->autoneg & AUTONEG_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) u32 adv1000_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) u32 new_adv1000 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) new_adv |= bnx2_phy_get_pause_adv(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) adv1000_reg &= PHY_ALL_1000_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if ((adv1000_reg != new_adv1000) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) (adv_reg != new_adv) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) ((bmcr & BMCR_ANENABLE) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) bnx2_write_phy(bp, bp->mii_adv, new_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) else if (bp->link_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) /* Flow ctrl may have changed from auto to forced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) /* or vice-versa. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) /* advertise nothing when forcing speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (adv_reg != new_adv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) bnx2_write_phy(bp, bp->mii_adv, new_adv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) new_bmcr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (bp->req_line_speed == SPEED_100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) new_bmcr |= BMCR_SPEED100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (bp->req_duplex == DUPLEX_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) new_bmcr |= BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (new_bmcr != bmcr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) u32 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) /* Force link down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) /* Normally, the new speed is setup after the link has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * gone down and up again. In some cases, link will not go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * down so we need to set up the new speed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) bp->line_speed = bp->req_line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) bp->duplex = bp->req_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) bnx2_resolve_flow_ctrl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) bnx2_set_mac_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) bnx2_setup_phy(struct bnx2 *bp, u8 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) __releases(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) __acquires(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (bp->loopback == MAC_LOOPBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) return bnx2_setup_serdes_phy(bp, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return bnx2_setup_copper_phy(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) bp->mii_bmcr = MII_BMCR + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) bp->mii_bmsr = MII_BMSR + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) bp->mii_adv = MII_ADVERTISE + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) bp->mii_lpa = MII_LPA + 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) bp->mii_up1 = MII_BNX2_OVER1G_UP1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) bnx2_reset_phy(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) val |= MII_BNX2_SD_1000XCTL1_FIBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) val |= BCM5708S_UP1_2G5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) val &= ~BCM5708S_UP1_2G5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) bnx2_reset_phy(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) bp->mii_up1 = BCM5708S_UP1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) bnx2_read_phy(bp, BCM5708S_UP1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) val |= BCM5708S_UP1_2G5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) bnx2_write_phy(bp, BCM5708S_UP1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /* increase tx signal amplitude */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) BCM5708S_BLK_ADDR_TX_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) u32 is_backplane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) BCM5708S_BLK_ADDR_TX_MISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) BCM5708S_BLK_ADDR_DIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) bnx2_reset_phy(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (bp->dev->mtu > ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) /* Set extended packet length bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) bnx2_write_phy(bp, 0x18, 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) bnx2_read_phy(bp, 0x18, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) bnx2_write_phy(bp, 0x1c, 0x6c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) bnx2_read_phy(bp, 0x1c, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) bnx2_write_phy(bp, 0x18, 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) bnx2_read_phy(bp, 0x18, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) bnx2_write_phy(bp, 0x18, val & ~0x4007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) bnx2_write_phy(bp, 0x1c, 0x6c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) bnx2_read_phy(bp, 0x1c, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) bnx2_reset_phy(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) bnx2_write_phy(bp, 0x18, 0x0c00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) bnx2_write_phy(bp, 0x17, 0x000a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) bnx2_write_phy(bp, 0x15, 0x310b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) bnx2_write_phy(bp, 0x17, 0x201f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) bnx2_write_phy(bp, 0x15, 0x9506);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) bnx2_write_phy(bp, 0x17, 0x401f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) bnx2_write_phy(bp, 0x15, 0x14e2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) bnx2_write_phy(bp, 0x18, 0x0400);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) MII_BNX2_DSP_EXPAND_REG | 0x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) val &= ~(1 << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (bp->dev->mtu > ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /* Set extended packet length bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) bnx2_write_phy(bp, 0x18, 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) bnx2_read_phy(bp, 0x18, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) bnx2_write_phy(bp, 0x18, val | 0x4000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) bnx2_read_phy(bp, 0x10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) bnx2_write_phy(bp, 0x10, val | 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) bnx2_write_phy(bp, 0x18, 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) bnx2_read_phy(bp, 0x18, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) bnx2_write_phy(bp, 0x18, val & ~0x4007);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) bnx2_read_phy(bp, 0x10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) bnx2_write_phy(bp, 0x10, val & ~0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /* ethernet@wirespeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* auto-mdix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) val |= AUX_CTL_MISC_CTL_AUTOMDIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) bnx2_init_phy(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) __releases(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) __acquires(&bp->phy_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) bp->mii_bmcr = MII_BMCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) bp->mii_bmsr = MII_BMSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) bp->mii_bmsr1 = MII_BMSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) bp->mii_adv = MII_ADVERTISE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) bp->mii_lpa = MII_LPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) goto setup_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) bnx2_read_phy(bp, MII_PHYSID1, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) bp->phy_id = val << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) bnx2_read_phy(bp, MII_PHYSID2, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) bp->phy_id |= val & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) rc = bnx2_init_5706s_phy(bp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) rc = bnx2_init_5708s_phy(bp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) rc = bnx2_init_5709s_phy(bp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) rc = bnx2_init_copper_phy(bp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) setup_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) rc = bnx2_setup_phy(bp, bp->phy_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) bnx2_set_mac_loopback(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) u32 mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) mac_mode &= ~BNX2_EMAC_MODE_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static int bnx2_test_link(struct bnx2 *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) bnx2_set_phy_loopback(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) u32 mac_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) BMCR_SPEED1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (bnx2_test_link(bp) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) BNX2_EMAC_MODE_25G_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) bp->link_up = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) bnx2_dump_mcp_state(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct net_device *dev = bp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) u32 mcp_p0, mcp_p1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) netdev_err(dev, "<--- start MCP states dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) mcp_p0 = BNX2_MCP_STATE_P0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) mcp_p1 = BNX2_MCP_STATE_P1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) mcp_p0 = BNX2_MCP_STATE_P0_5708;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) mcp_p1 = BNX2_MCP_STATE_P1_5708;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) netdev_err(dev, "DEBUG: shmem states:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) bnx2_shmem_rd(bp, BNX2_DRV_MB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) bnx2_shmem_rd(bp, BNX2_FW_MB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) pr_cont(" condition[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) DP_SHMEM_LINE(bp, 0x3cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) DP_SHMEM_LINE(bp, 0x3dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) DP_SHMEM_LINE(bp, 0x3ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) netdev_err(dev, "<--- end MCP states dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) bp->fw_wr_seq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) msg_data |= bp->fw_wr_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) bp->fw_last_msg = msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (!ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* wait for an acknowledgement. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) val = bnx2_shmem_rd(bp, BNX2_FW_MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /* If we timed out, inform the firmware that this is the case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) msg_data &= ~BNX2_DRV_MSG_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!silent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) pr_err("fw sync timeout, reset code = %x\n", msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) bnx2_dump_mcp_state(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) bnx2_init_5709_context(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) val |= (BNX2_PAGE_BITS - 8) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) BNX2_WR(bp, BNX2_CTX_COMMAND, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) val = BNX2_RD(bp, BNX2_CTX_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (val & BNX2_CTX_COMMAND_MEM_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) for (i = 0; i < bp->ctx_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (bp->ctx_blk[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) (bp->ctx_blk_mapping[i] & 0xffffffff) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) (u64) bp->ctx_blk_mapping[i] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) for (j = 0; j < 10; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) bnx2_init_context(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) u32 vcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) vcid = 96;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) while (vcid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) u32 vcid_addr, pcid_addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) vcid--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) u32 new_vcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) vcid_addr = GET_PCID_ADDR(vcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (vcid & 0x8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) new_vcid = vcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) pcid_addr = GET_PCID_ADDR(new_vcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) vcid_addr = GET_CID_ADDR(vcid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) pcid_addr = vcid_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) vcid_addr += (i << PHY_CTX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) pcid_addr += (i << PHY_CTX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /* Zero out the context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) bnx2_ctx_wr(bp, vcid_addr, offset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) bnx2_alloc_bad_rbuf(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) u16 *good_mbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) u32 good_mbuf_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (!good_mbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) good_mbuf_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) /* Allocate a bunch of mbufs and save the good ones in an array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) BNX2_RBUF_COMMAND_ALLOC_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /* The addresses with Bit 9 set are bad memory blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (!(val & (1 << 9))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) good_mbuf[good_mbuf_cnt] = (u16) val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) good_mbuf_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) /* Free the good ones back to the mbuf pool thus discarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * all the bad ones. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) while (good_mbuf_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) good_mbuf_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) val = good_mbuf[good_mbuf_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) val = (val << 9) | val | 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) kfree(good_mbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) val = (mac_addr[0] << 8) | mac_addr[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) (mac_addr[4] << 8) | mac_addr[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) struct bnx2_rx_bd *rxbd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) struct page *page = alloc_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (dma_mapping_error(&bp->pdev->dev, mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) rx_pg->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) dma_unmap_addr_set(rx_pg, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct page *page = rx_pg->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) PAGE_SIZE, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) rx_pg->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) struct bnx2_rx_bd *rxbd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) data = kmalloc(bp->rx_buf_size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) mapping = dma_map_single(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) get_l2_fhdr(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) bp->rx_buf_use_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (dma_mapping_error(&bp->pdev->dev, mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) rx_buf->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) dma_unmap_addr_set(rx_buf, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) rxr->rx_prod_bseq += bp->rx_buf_use_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) struct status_block *sblk = bnapi->status_blk.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) u32 new_link_state, old_link_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) int is_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) new_link_state = sblk->status_attn_bits & event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) old_link_state = sblk->status_attn_bits_ack & event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (new_link_state != old_link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) if (new_link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) is_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) return is_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) spin_lock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) bnx2_set_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) bnx2_set_remote_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) spin_unlock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) static inline u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) u16 cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) return cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) u16 hw_cons, sw_cons, sw_ring_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) int tx_pkt = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) unsigned int tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) index = (bnapi - bp->bnx2_napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) txq = netdev_get_tx_queue(bp->dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) hw_cons = bnx2_get_hw_tx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) sw_cons = txr->tx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) while (sw_cons != hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) struct bnx2_sw_tx_bd *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) int i, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) tx_buf = &txr->tx_buf_ring[sw_ring_cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) skb = tx_buf->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) prefetch(&skb->end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) /* partial BD completions possible with TSO packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (tx_buf->is_gso) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) u16 last_idx, last_ring_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) last_idx = sw_cons + tx_buf->nr_frags + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) last_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) skb_headlen(skb), PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) tx_buf->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) last = tx_buf->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) for (i = 0; i < last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) struct bnx2_sw_tx_bd *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) sw_cons = BNX2_NEXT_TX_BD(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) dma_unmap_page(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) skb_frag_size(&skb_shinfo(skb)->frags[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) sw_cons = BNX2_NEXT_TX_BD(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) tx_pkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (tx_pkt == budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (hw_cons == sw_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) hw_cons = bnx2_get_hw_tx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) txr->hw_tx_cons = hw_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) txr->tx_cons = sw_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) /* Need to make the tx_cons update visible to bnx2_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * before checking for netif_tx_queue_stopped(). Without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * memory barrier, there is a small possibility that bnx2_start_xmit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * will miss it and cause the queue to be stopped forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (unlikely(netif_tx_queue_stopped(txq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) __netif_tx_lock(txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) if ((netif_tx_queue_stopped(txq)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) __netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) return tx_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) struct sk_buff *skb, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) struct bnx2_rx_bd *cons_bd, *prod_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) u16 hw_prod, prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) u16 cons = rxr->rx_pg_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) cons_rx_pg = &rxr->rx_pg_ring[cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /* The caller was unable to allocate a new page to replace the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * last one in the frags array, so we need to recycle that page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * and then free the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) struct skb_shared_info *shinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) shinfo->nr_frags--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) cons_rx_pg->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) hw_prod = rxr->rx_pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) prod = BNX2_RX_PG_RING_IDX(hw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) prod_rx_pg = &rxr->rx_pg_ring[prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) cons_rx_pg = &rxr->rx_pg_ring[cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) [BNX2_RX_IDX(cons)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) [BNX2_RX_IDX(prod)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) if (prod != cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) prod_rx_pg->page = cons_rx_pg->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) cons_rx_pg->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) dma_unmap_addr_set(prod_rx_pg, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) dma_unmap_addr(cons_rx_pg, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) hw_prod = BNX2_NEXT_RX_BD(hw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) rxr->rx_pg_prod = hw_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) rxr->rx_pg_cons = cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) u8 *data, u16 cons, u16 prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) struct bnx2_rx_bd *cons_bd, *prod_bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) cons_rx_buf = &rxr->rx_buf_ring[cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) prod_rx_buf = &rxr->rx_buf_ring[prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) dma_sync_single_for_device(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) dma_unmap_addr(cons_rx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) rxr->rx_prod_bseq += bp->rx_buf_use_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) prod_rx_buf->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (cons == prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) dma_unmap_addr_set(prod_rx_buf, mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) dma_unmap_addr(cons_rx_buf, mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) u32 ring_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) u16 prod = ring_idx & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) if (hdr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) unsigned int raw_len = len + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) skb = build_skb(data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (hdr_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) unsigned int i, frag_len, frag_size, pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) struct bnx2_sw_pg *rx_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) u16 pg_cons = rxr->rx_pg_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) u16 pg_prod = rxr->rx_pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) frag_size = len + 4 - hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) skb_put(skb, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) for (i = 0; i < pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) dma_addr_t mapping_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (unlikely(frag_len <= 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) unsigned int tail = 4 - frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) rxr->rx_pg_cons = pg_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) rxr->rx_pg_prod = pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) pages - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) skb->len -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) skb->tail -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) skb_frag_t *frag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) &skb_shinfo(skb)->frags[i - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) skb_frag_size_sub(frag, tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) skb->data_len -= tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) rx_pg = &rxr->rx_pg_ring[pg_cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) /* Don't unmap yet. If we're unable to allocate a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) * page, we need to recycle the page and the DMA addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) mapping_old = dma_unmap_addr(rx_pg, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) if (i == pages - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) frag_len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) rx_pg->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) err = bnx2_alloc_rx_page(bp, rxr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) BNX2_RX_PG_RING_IDX(pg_prod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) rxr->rx_pg_cons = pg_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) rxr->rx_pg_prod = pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) bnx2_reuse_rx_skb_pages(bp, rxr, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) pages - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) dma_unmap_page(&bp->pdev->dev, mapping_old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) PAGE_SIZE, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) frag_size -= frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) skb->data_len += frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) skb->truesize += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) skb->len += frag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) pg_prod = BNX2_NEXT_RX_BD(pg_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) rxr->rx_pg_prod = pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) rxr->rx_pg_cons = pg_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) static inline u16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) u16 cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct l2_fhdr *rx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) int rx_pkt = 0, pg_ring_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) if (budget <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) return rx_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) hw_cons = bnx2_get_hw_rx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) sw_cons = rxr->rx_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) sw_prod = rxr->rx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) /* Memory barrier necessary as speculative reads of the rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * buffer can be ahead of the index in the status block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) while (sw_cons != hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) unsigned int len, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) struct bnx2_sw_bd *rx_buf, *next_rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) u16 next_ring_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) data = rx_buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) rx_buf->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) rx_hdr = get_l2_fhdr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) prefetch(rx_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) dma_addr = dma_unmap_addr(rx_buf, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) prefetch(get_l2_fhdr(next_rx_buf->data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) len = rx_hdr->l2_fhdr_pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) status = rx_hdr->l2_fhdr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) hdr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (status & L2_FHDR_STATUS_SPLIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) hdr_len = rx_hdr->l2_fhdr_ip_xsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) pg_ring_used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) } else if (len > bp->rx_jumbo_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) hdr_len = bp->rx_jumbo_thresh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) pg_ring_used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) L2_FHDR_ERRORS_PHY_DECODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) L2_FHDR_ERRORS_ALIGNMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) L2_FHDR_ERRORS_TOO_SHORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) L2_FHDR_ERRORS_GIANT_FRAME))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) sw_ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (pg_ring_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) goto next_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) len -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) if (len <= bp->rx_copy_thresh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) skb = netdev_alloc_skb(bp->dev, len + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) sw_ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) goto next_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) /* aligned copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) memcpy(skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) len + 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) skb_reserve(skb, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) bnx2_reuse_rx_data(bp, rxr, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) sw_ring_cons, sw_ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) (sw_ring_cons << 16) | sw_ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) goto next_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) skb->protocol = eth_type_trans(skb, bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (len > (bp->dev->mtu + ETH_HLEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) skb->protocol != htons(0x8100) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) skb->protocol != htons(ETH_P_8021AD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) goto next_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) if ((bp->dev->features & NETIF_F_RXCSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) (status & (L2_FHDR_STATUS_TCP_SEGMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) L2_FHDR_STATUS_UDP_DATAGRAM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) L2_FHDR_ERRORS_UDP_XSUM)) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) if ((bp->dev->features & NETIF_F_RXHASH) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) ((status & L2_FHDR_STATUS_USE_RXHASH) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) L2_FHDR_STATUS_USE_RXHASH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) PKT_HASH_TYPE_L3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) napi_gro_receive(&bnapi->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) rx_pkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) next_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) sw_cons = BNX2_NEXT_RX_BD(sw_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) sw_prod = BNX2_NEXT_RX_BD(sw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) if (rx_pkt == budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) /* Refresh hw_cons to see if there is new work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (sw_cons == hw_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) hw_cons = bnx2_get_hw_rx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) rxr->rx_cons = sw_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) rxr->rx_prod = sw_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (pg_ring_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) return rx_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /* MSI ISR - The only difference between this and the INTx ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) * is that the MSI interrupt is always serviced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) bnx2_msi(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) struct bnx2_napi *bnapi = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) struct bnx2 *bp = bnapi->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) prefetch(bnapi->status_blk.msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) /* Return here if interrupt is disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (unlikely(atomic_read(&bp->intr_sem) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) napi_schedule(&bnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) bnx2_msi_1shot(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) struct bnx2_napi *bnapi = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) struct bnx2 *bp = bnapi->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) prefetch(bnapi->status_blk.msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) /* Return here if interrupt is disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) if (unlikely(atomic_read(&bp->intr_sem) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) napi_schedule(&bnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) static irqreturn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) bnx2_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) struct bnx2_napi *bnapi = dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) struct bnx2 *bp = bnapi->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) struct status_block *sblk = bnapi->status_blk.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) /* When using INTx, it is possible for the interrupt to arrive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) * at the CPU before the status block posted prior to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) * interrupt. Reading a register will flush the status block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) * When using MSI, the MSI message will always complete after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * the status block write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if ((sblk->status_idx == bnapi->last_status_idx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) /* Read back to deassert IRQ immediately to avoid too many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) * spurious interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) /* Return here if interrupt is shared and is disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (unlikely(atomic_read(&bp->intr_sem) != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) if (napi_schedule_prep(&bnapi->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) bnapi->last_status_idx = sblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) __napi_schedule(&bnapi->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) bnx2_has_fast_work(struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) STATUS_ATTN_BITS_TIMER_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) bnx2_has_work(struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) struct status_block *sblk = bnapi->status_blk.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) if (bnx2_has_fast_work(bnapi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) bnx2_chk_missed_msi(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) u32 msi_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (bnx2_has_work(bnapi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) bnx2_msi(bp->irq_tbl[0].vector, bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) bp->idle_chk_status_idx = bnapi->last_status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) struct cnic_ops *c_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) if (!bnapi->cnic_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) c_ops = rcu_dereference(bp->cnic_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (c_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) bnapi->status_blk.msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) struct status_block *sblk = bnapi->status_blk.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) u32 status_attn_bits = sblk->status_attn_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) bnx2_phy_int(bp, bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) /* This is needed to take care of transient status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) * during link changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) BNX2_WR(bp, BNX2_HC_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) BNX2_RD(bp, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) int work_done, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) bnx2_tx_int(bp, bnapi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) static int bnx2_poll_msix(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) struct bnx2 *bp = bnapi->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) struct status_block_msix *sblk = bnapi->status_blk.msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) if (unlikely(work_done >= budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) bnapi->last_status_idx = sblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) /* status idx must be read before checking for more work. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (likely(!bnx2_has_fast_work(bnapi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) static int bnx2_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) struct bnx2 *bp = bnapi->bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) struct status_block *sblk = bnapi->status_blk.msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) bnx2_poll_link(bp, bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) bnx2_poll_cnic(bp, bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) /* bnapi->last_status_idx is used below to tell the hw how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) * much work has been processed, so we must read it before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) * checking for more work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) bnapi->last_status_idx = sblk->status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) if (unlikely(work_done >= budget))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) if (likely(!bnx2_has_work(bnapi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) bnapi->last_status_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) /* Called with rtnl_lock from vlan functions and also netif_tx_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) * from set_multicast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) bnx2_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) u32 rx_mode, sort_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) /* Promiscuous mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) BNX2_RPM_SORT_USER0_PROM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) else if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) /* Accept one or more multicast(s). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) u32 mc_filter[NUM_MC_HASH_REGISTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) u32 regidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) u32 bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) netdev_for_each_mc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) crc = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) bit = crc & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) regidx = (bit & 0xe0) >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) bit &= 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) mc_filter[regidx] |= (1 << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) mc_filter[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) BNX2_RPM_SORT_USER0_PROM_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) } else if (!(dev->flags & IFF_PROMISC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) /* Add all entries into to the match filter list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) netdev_for_each_uc_addr(ha, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) bnx2_set_mac_addr(bp, ha->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) i + BNX2_START_UNICAST_ADDRESS_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) sort_mode |= (1 <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) (i + BNX2_START_UNICAST_ADDRESS_INDEX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (rx_mode != bp->rx_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) bp->rx_mode = rx_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) check_fw_section(const struct firmware *fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) const struct bnx2_fw_file_section *section,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) u32 alignment, bool non_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) u32 offset = be32_to_cpu(section->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) u32 len = be32_to_cpu(section->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) if ((non_empty && len == 0) || len > fw->size - offset ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) len & (alignment - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) check_mips_fw_entry(const struct firmware *fw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) const struct bnx2_mips_fw_file_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) if (check_fw_section(fw, &entry->text, 4, true) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) check_fw_section(fw, &entry->data, 4, false) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) check_fw_section(fw, &entry->rodata, 4, false))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) static void bnx2_release_firmware(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) if (bp->rv2p_firmware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) release_firmware(bp->mips_firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) release_firmware(bp->rv2p_firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) bp->rv2p_firmware = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) static int bnx2_request_uncached_firmware(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) const char *mips_fw_file, *rv2p_fw_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) const struct bnx2_mips_fw_file *mips_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) const struct bnx2_rv2p_fw_file *rv2p_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) mips_fw_file = FW_MIPS_FILE_09;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) rv2p_fw_file = FW_RV2P_FILE_09_Ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) rv2p_fw_file = FW_RV2P_FILE_09;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) mips_fw_file = FW_MIPS_FILE_06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) rv2p_fw_file = FW_RV2P_FILE_06;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) goto err_release_mips_firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) if (bp->mips_firmware->size < sizeof(*mips_fw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) goto err_release_firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) goto err_release_firmware;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) err_release_firmware:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) release_firmware(bp->rv2p_firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) bp->rv2p_firmware = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) err_release_mips_firmware:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) release_firmware(bp->mips_firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) static int bnx2_request_firmware(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) rv2p_code |= RV2P_BD_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) return rv2p_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) const struct bnx2_rv2p_fw_file_entry *fw_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) u32 rv2p_code_len, file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) __be32 *rv2p_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) u32 val, cmd, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) file_offset = be32_to_cpu(fw_entry->rv2p.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) if (rv2p_proc == RV2P_PROC1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) addr = BNX2_RV2P_PROC1_ADDR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) addr = BNX2_RV2P_PROC2_ADDR_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) for (i = 0; i < rv2p_code_len; i += 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) rv2p_code++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) rv2p_code++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) val = (i / 8) | cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) BNX2_WR(bp, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) for (i = 0; i < 8; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) u32 loc, code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) loc = be32_to_cpu(fw_entry->fixup[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) if (loc && ((loc * 4) < rv2p_code_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) code = be32_to_cpu(*(rv2p_code + loc - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) code = be32_to_cpu(*(rv2p_code + loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) val = (loc / 2) | cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) BNX2_WR(bp, addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) /* Reset the processor, un-stall is done later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (rv2p_proc == RV2P_PROC1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) const struct bnx2_mips_fw_file_entry *fw_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) u32 addr, len, file_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) __be32 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) /* Halt the CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) val |= cpu_reg->mode_value_halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) /* Load the Text area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) addr = be32_to_cpu(fw_entry->text.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) len = be32_to_cpu(fw_entry->text.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) file_offset = be32_to_cpu(fw_entry->text.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) data = (__be32 *)(bp->mips_firmware->data + file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) for (j = 0; j < (len / 4); j++, offset += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) /* Load the Data area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) addr = be32_to_cpu(fw_entry->data.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) len = be32_to_cpu(fw_entry->data.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) file_offset = be32_to_cpu(fw_entry->data.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) data = (__be32 *)(bp->mips_firmware->data + file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) for (j = 0; j < (len / 4); j++, offset += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) /* Load the Read-Only area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) addr = be32_to_cpu(fw_entry->rodata.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) len = be32_to_cpu(fw_entry->rodata.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) file_offset = be32_to_cpu(fw_entry->rodata.offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) data = (__be32 *)(bp->mips_firmware->data + file_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) for (j = 0; j < (len / 4); j++, offset += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) /* Clear the pre-fetch instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) val = be32_to_cpu(fw_entry->start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) /* Start the CPU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) val &= ~cpu_reg->mode_value_halt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) bnx2_init_cpus(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) const struct bnx2_mips_fw_file *mips_fw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) const struct bnx2_rv2p_fw_file *rv2p_fw =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) /* Initialize the RV2P processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) /* Initialize the RX Processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) goto init_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) /* Initialize the TX Processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) goto init_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) /* Initialize the TX Patch-up Processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) goto init_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) /* Initialize the Completion Processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) goto init_cpu_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) /* Initialize the Command Processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) init_cpu_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) bnx2_setup_wol(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) u32 val, wol_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (bp->wol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) u32 advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) u8 autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) autoneg = bp->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) advertising = bp->advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) if (bp->phy_port == PORT_TP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) bp->autoneg = AUTONEG_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) bp->advertising = ADVERTISED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) ADVERTISED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) ADVERTISED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) ADVERTISED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) bnx2_setup_phy(bp, bp->phy_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) bp->autoneg = autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) bp->advertising = advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) val = BNX2_RD(bp, BNX2_EMAC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) /* Enable port mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) val &= ~BNX2_EMAC_MODE_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) val |= BNX2_EMAC_MODE_MPKT_RCVD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) BNX2_EMAC_MODE_ACPI_RCVD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) BNX2_EMAC_MODE_MPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) if (bp->phy_port == PORT_TP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) val |= BNX2_EMAC_MODE_PORT_MII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) val |= BNX2_EMAC_MODE_PORT_GMII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (bp->line_speed == SPEED_2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) val |= BNX2_EMAC_MODE_25G_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) BNX2_WR(bp, BNX2_EMAC_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) /* receive all multicast */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /* Need to enable EMAC and RPM for WOL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) val = BNX2_RD(bp, BNX2_RPM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) BNX2_WR(bp, BNX2_RPM_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) bnx2_fw_sync(bp, wol_msg, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) /* Tell firmware not to power down the PHY yet, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * the chip will take a long time to respond to MMIO reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) val | BNX2_PORT_FEATURE_ASF_ENABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) bnx2_fw_sync(bp, wol_msg, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) case PCI_D0: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) pci_enable_wake(bp->pdev, PCI_D0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) pci_set_power_state(bp->pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) val = BNX2_RD(bp, BNX2_EMAC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) val &= ~BNX2_EMAC_MODE_MPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) BNX2_WR(bp, BNX2_EMAC_MODE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) val = BNX2_RD(bp, BNX2_RPM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) BNX2_WR(bp, BNX2_RPM_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) case PCI_D3hot: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) bnx2_setup_wol(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) pci_wake_from_d3(bp->pdev, bp->wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (bp->wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) pci_set_power_state(bp->pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) /* Tell firmware not to power down the PHY yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) * otherwise the other port may not respond to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) * MMIO reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) val &= ~BNX2_CONDITION_PM_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) val |= BNX2_CONDITION_PM_STATE_UNPREP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) pci_set_power_state(bp->pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) /* No more memory access after this point until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) * device is brought back to D0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) bnx2_acquire_nvram_lock(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) /* Request access to the flash interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) bnx2_release_nvram_lock(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) /* Relinquish nvram interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) bnx2_enable_nvram_write(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) val = BNX2_RD(bp, BNX2_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) if (bp->flash_info->flags & BNX2_NV_WREN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) BNX2_WR(bp, BNX2_NVM_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) val = BNX2_RD(bp, BNX2_NVM_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) if (val & BNX2_NVM_COMMAND_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) bnx2_disable_nvram_write(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) val = BNX2_RD(bp, BNX2_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) bnx2_enable_nvram_access(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) /* Enable both bits, even on read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) bnx2_disable_nvram_access(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) /* Disable both bits, even after read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) BNX2_NVM_ACCESS_ENABLE_WR_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (bp->flash_info->flags & BNX2_NV_BUFFERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) /* Buffered flash, no erase needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) /* Build an erase command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) BNX2_NVM_COMMAND_DOIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) /* Need to clear DONE bit separately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) /* Address of the NVRAM to read from. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) /* Issue an erase command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) /* Wait for completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) val = BNX2_RD(bp, BNX2_NVM_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) if (val & BNX2_NVM_COMMAND_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) /* Build the command word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) /* Calculate an offset of a buffered flash, not needed for 5709. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) offset = ((offset / bp->flash_info->page_size) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) bp->flash_info->page_bits) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) (offset % bp->flash_info->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) /* Need to clear DONE bit separately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) /* Address of the NVRAM to read from. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) /* Issue a read command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) /* Wait for completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) val = BNX2_RD(bp, BNX2_NVM_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) if (val & BNX2_NVM_COMMAND_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) memcpy(ret_val, &v, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) __be32 val32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) /* Build the command word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) /* Calculate an offset of a buffered flash, not needed for 5709. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) offset = ((offset / bp->flash_info->page_size) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) bp->flash_info->page_bits) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) (offset % bp->flash_info->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) /* Need to clear DONE bit separately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) memcpy(&val32, val, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) /* Write the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) /* Address of the NVRAM to write to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) /* Issue the write command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) /* Wait for completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if (j >= NVRAM_TIMEOUT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) bnx2_init_nvram(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) int j, entry_count, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) const struct flash_spec *flash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) bp->flash_info = &flash_5709;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) goto get_flash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) /* Determine the selected interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) val = BNX2_RD(bp, BNX2_NVM_CFG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) entry_count = ARRAY_SIZE(flash_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) if (val & 0x40000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) /* Flash interface has been reconfigured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) for (j = 0, flash = &flash_table[0]; j < entry_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) j++, flash++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) if ((val & FLASH_BACKUP_STRAP_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) bp->flash_info = flash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) /* Not yet been reconfigured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) if (val & (1 << 23))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) mask = FLASH_BACKUP_STRAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) mask = FLASH_STRAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) for (j = 0, flash = &flash_table[0]; j < entry_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) j++, flash++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) if ((val & mask) == (flash->strapping & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) bp->flash_info = flash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) /* Request access to the flash interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) /* Enable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) bnx2_enable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) /* Reconfigure the flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) /* Disable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) bnx2_disable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) bnx2_release_nvram_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) } /* if (val & 0x40000000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (j == entry_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) bp->flash_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) pr_alert("Unknown flash/EEPROM type\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) get_flash_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) bp->flash_size = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) bp->flash_size = bp->flash_info->total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) int buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) u32 cmd_flags, offset32, len32, extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) if (buf_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) /* Request access to the flash interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) /* Enable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) bnx2_enable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) len32 = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) offset32 = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) extra = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) if (offset32 & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) u8 buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) u32 pre_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) offset32 &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) pre_len = 4 - (offset & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) if (pre_len >= len32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) pre_len = len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) cmd_flags = BNX2_NVM_COMMAND_FIRST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) cmd_flags = BNX2_NVM_COMMAND_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) memcpy(ret_buf, buf + (offset & 3), pre_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) offset32 += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) ret_buf += pre_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) len32 -= pre_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) if (len32 & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) extra = 4 - (len32 & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) len32 = (len32 + 4) & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) if (len32 == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) u8 buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) if (cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) cmd_flags = BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) cmd_flags = BNX2_NVM_COMMAND_FIRST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) memcpy(ret_buf, buf, 4 - extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) else if (len32 > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) u8 buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) /* Read the first word. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) if (cmd_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) cmd_flags = BNX2_NVM_COMMAND_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) /* Advance to the next dword. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) offset32 += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) ret_buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) len32 -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) while (len32 > 4 && rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) /* Advance to the next dword. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) offset32 += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) ret_buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) len32 -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) cmd_flags = BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) memcpy(ret_buf, buf, 4 - extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) /* Disable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) bnx2_disable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) bnx2_release_nvram_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) int buf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) u32 written, offset32, len32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) int align_start, align_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) buf = data_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) offset32 = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) len32 = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) align_start = align_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if ((align_start = (offset32 & 3))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) offset32 &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) len32 += align_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) if (len32 < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) len32 = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) if (len32 & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) align_end = 4 - (len32 & 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) len32 += align_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) if (align_start || align_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) align_buf = kmalloc(len32, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) if (!align_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) if (align_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) memcpy(align_buf, start, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) if (align_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) memcpy(align_buf + len32 - 4, end, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) memcpy(align_buf + align_start, data_buf, buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) buf = align_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) flash_buffer = kmalloc(264, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) if (!flash_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) while ((written < len32) && (rc == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) u32 page_start, page_end, data_start, data_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) u32 addr, cmd_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) /* Find the page_start addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) page_start = offset32 + written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) page_start -= (page_start % bp->flash_info->page_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) /* Find the page_end addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) page_end = page_start + bp->flash_info->page_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) /* Find the data_start addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) data_start = (written == 0) ? offset32 : page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) /* Find the data_end addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) data_end = (page_end > offset32 + len32) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) (offset32 + len32) : page_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) /* Request access to the flash interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) /* Enable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) bnx2_enable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) cmd_flags = BNX2_NVM_COMMAND_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) /* Read the whole page into the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) * (non-buffer flash only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) for (j = 0; j < bp->flash_info->page_size; j += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) if (j == (bp->flash_info->page_size - 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) cmd_flags |= BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) rc = bnx2_nvram_read_dword(bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) page_start + j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) &flash_buffer[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) /* Enable writes to flash interface (unlock write-protect) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if ((rc = bnx2_enable_nvram_write(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) /* Loop to write back the buffer data from page_start to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) * data_start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) /* Erase the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) /* Re-enable the write again for the actual write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) bnx2_enable_nvram_write(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) for (addr = page_start; addr < data_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) addr += 4, i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) rc = bnx2_nvram_write_dword(bp, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) &flash_buffer[i], cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) /* Loop to write the new data from data_start to data_end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) for (addr = data_start; addr < data_end; addr += 4, i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) if ((addr == page_end - 4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) (addr == data_end - 4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) cmd_flags |= BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) rc = bnx2_nvram_write_dword(bp, addr, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) buf += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) /* Loop to write back the buffer data from data_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) * to page_end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) for (addr = data_end; addr < page_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) addr += 4, i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) if (addr == page_end-4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) cmd_flags = BNX2_NVM_COMMAND_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) rc = bnx2_nvram_write_dword(bp, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) &flash_buffer[i], cmd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) goto nvram_write_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) cmd_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) /* Disable writes to flash interface (lock write-protect) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) bnx2_disable_nvram_write(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) /* Disable access to flash interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) bnx2_disable_nvram_access(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) bnx2_release_nvram_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) /* Increment written */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) written += data_end - data_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) nvram_write_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) kfree(flash_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) kfree(align_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) bnx2_init_fw_cap(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) u32 val, sig = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) u32 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (link & BNX2_LINK_STATUS_SERDES_LINK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) bp->phy_port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) bp->phy_port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) if (netif_running(bp->dev) && sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) bnx2_setup_msix_tbl(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) bnx2_wait_dma_complete(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) * Wait for the current PCI transaction to complete before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) * issuing a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) } else { /* 5709 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) for (i = 0; i < 100; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) u8 old_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) /* Wait for the current PCI transaction to complete before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) * issuing a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) bnx2_wait_dma_complete(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /* Wait for the firmware to tell us it is ok to issue a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) /* Deposit a driver reset signature so the firmware knows that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) * this is a soft reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) BNX2_DRV_RESET_SIGNATURE_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) /* Do a dummy read to force the chip to complete all current transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) * before we issue a reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) val = BNX2_RD(bp, BNX2_MISC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) BNX2_RD(bp, BNX2_MISC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) /* Chip reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) /* Reading back any register after chip reset will hang the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) * bus on 5706 A0 and A1. The msleep below provides plenty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) * of margin for write posting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) /* Reset takes approximate 30 usec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) pr_err("Chip reset did not complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) /* Make sure byte swapping is properly configured. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) if (val != 0x01020304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) pr_err("Chip not in correct endian mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) /* Wait for the firmware to finish its initialization. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) old_port = bp->phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) bnx2_init_fw_cap(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) old_port != bp->phy_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) bnx2_set_default_remote_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) /* Adjust the voltage regular to two steps lower. The default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) * of this register is 0x0000000e. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) /* Remove bad rbuf memory from the free pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) rc = bnx2_alloc_bad_rbuf(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) if (bp->flags & BNX2_FLAG_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) bnx2_setup_msix_tbl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) /* Prevent MSIX table reads and write from timing out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) bnx2_init_chip(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) u32 val, mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) /* Make sure the interrupt is not active. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) BNX2_DMA_CONFIG_DATA_WORD_SWAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) DMA_READ_CHANS << 12 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) DMA_WRITE_CHANS << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) val |= (0x2 << 20) | (1 << 11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) val |= (1 << 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) !(bp->flags & BNX2_FLAG_PCIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) BNX2_WR(bp, BNX2_DMA_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) val |= BNX2_TDMA_CONFIG_ONE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) if (bp->flags & BNX2_FLAG_PCIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) u16 val16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) &val16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) val16 & ~PCI_X_CMD_ERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) /* Initialize context mapping and zero out the quick contexts. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) * context block must have already been enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) rc = bnx2_init_5709_context(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) bnx2_init_context(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) if ((rc = bnx2_init_cpus(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) bnx2_init_nvram(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) val = BNX2_RD(bp, BNX2_MQ_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) val |= BNX2_MQ_CONFIG_HALT_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) BNX2_WR(bp, BNX2_MQ_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) val = (BNX2_PAGE_BITS - 8) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) /* Configure page size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) val = bp->mac_addr[0] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) (bp->mac_addr[1] << 8) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) (bp->mac_addr[2] << 16) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) bp->mac_addr[3] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) (bp->mac_addr[4] << 8) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) (bp->mac_addr[5] << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) /* Program the MTU. Also include 4 bytes for CRC32. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) mtu = bp->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) val = mtu + ETH_HLEN + ETH_FCS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) if (mtu < ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) mtu = ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) bp->bnx2_napi[i].last_status_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) bp->idle_chk_status_idx = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) /* Set up how to generate a link change interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) (u64) bp->status_blk_mapping & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) (u64) bp->stats_blk_mapping & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) (u64) bp->stats_blk_mapping >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) BNX2_WR(bp, BNX2_HC_COM_TICKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) (bp->com_ticks_int << 16) | bp->com_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) BNX2_WR(bp, BNX2_HC_CMD_TICKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) if (bp->flags & BNX2_FLAG_BROKEN_STATS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) val = BNX2_HC_CONFIG_COLLECT_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) BNX2_HC_CONFIG_COLLECT_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) if (bp->flags & BNX2_FLAG_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) BNX2_HC_MSIX_BIT_VECTOR_VAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) BNX2_WR(bp, BNX2_HC_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) if (bp->rx_ticks < 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) for (i = 1; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) BNX2_HC_SB_CONFIG_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) BNX2_WR(bp, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) BNX2_HC_SB_CONFIG_1_ONE_SHOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) (bp->tx_quick_cons_trip_int << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) bp->tx_quick_cons_trip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) (bp->tx_ticks_int << 16) | bp->tx_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) (bp->rx_quick_cons_trip_int << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) bp->rx_quick_cons_trip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) (bp->rx_ticks_int << 16) | bp->rx_ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) /* Clear internal stats counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) /* Initialize the receive filter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) bnx2_set_rx_mode(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) udelay(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) bnx2_clear_ring_states(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) struct bnx2_tx_ring_info *txr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) struct bnx2_rx_ring_info *rxr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) txr->tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) txr->hw_tx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) rxr->rx_prod_bseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) rxr->rx_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) rxr->rx_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) rxr->rx_pg_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) rxr->rx_pg_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) u32 val, offset0, offset1, offset2, offset3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) u32 cid_addr = GET_CID_ADDR(cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) offset0 = BNX2_L2CTX_TYPE_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) offset1 = BNX2_L2CTX_CMD_TYPE_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) offset0 = BNX2_L2CTX_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) offset1 = BNX2_L2CTX_CMD_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) bnx2_ctx_wr(bp, cid_addr, offset0, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) bnx2_ctx_wr(bp, cid_addr, offset1, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) val = (u64) txr->tx_desc_mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) bnx2_ctx_wr(bp, cid_addr, offset2, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) val = (u64) txr->tx_desc_mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) bnx2_ctx_wr(bp, cid_addr, offset3, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) struct bnx2_tx_bd *txbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) u32 cid = TX_CID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) struct bnx2_tx_ring_info *txr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) bnapi = &bp->bnx2_napi[ring_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) if (ring_num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) cid = TX_CID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) cid = TX_TSS_CID + ring_num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) bp->tx_wake_thresh = bp->tx_ring_size / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) txr->tx_prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) txr->tx_prod_bseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) bnx2_init_tx_context(bp, cid, txr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) u32 buf_size, int num_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) struct bnx2_rx_bd *rxbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) for (i = 0; i < num_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) rxbd = &rx_ring[i][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) rxbd->rx_bd_len = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) if (i == (num_rings - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) j = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) u16 prod, ring_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) u32 cid, rx_cid_addr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) if (ring_num == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) cid = RX_CID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) cid = RX_RSS_CID + ring_num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) rx_cid_addr = GET_CID_ADDR(cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) bp->rx_buf_use_size, bp->rx_max_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) bnx2_init_rx_context(bp, cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) if (bp->rx_pg_ring_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) rxr->rx_pg_desc_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) PAGE_SIZE, bp->rx_max_pg_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) val = (u64) rxr->rx_desc_mapping[0] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) ring_prod = prod = rxr->rx_pg_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) for (i = 0; i < bp->rx_pg_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) ring_num, i, bp->rx_pg_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) prod = BNX2_NEXT_RX_BD(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) ring_prod = BNX2_RX_PG_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) rxr->rx_pg_prod = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) ring_prod = prod = rxr->rx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) for (i = 0; i < bp->rx_ring_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) ring_num, i, bp->rx_ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) prod = BNX2_NEXT_RX_BD(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) ring_prod = BNX2_RX_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) rxr->rx_prod = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) bnx2_init_all_rings(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) bnx2_clear_ring_states(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) for (i = 0; i < bp->num_tx_rings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) bnx2_init_tx_ring(bp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) if (bp->num_tx_rings > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) (TX_TSS_CID << 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) for (i = 0; i < bp->num_rx_rings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) bnx2_init_rx_ring(bp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) if (bp->num_rx_rings > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) u32 tbl_32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) int shift = (i % 8) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) if ((i % 8) == 7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) BNX2_RLUP_RSS_COMMAND_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) BNX2_RLUP_RSS_COMMAND_HASH_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) tbl_32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) u32 max, num_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) while (ring_size > BNX2_MAX_RX_DESC_CNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) ring_size -= BNX2_MAX_RX_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) num_rings++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) /* round to next power of 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) max = max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) while ((max & num_rings) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) max >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) if (num_rings != max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) max <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) u32 rx_size, rx_space, jumbo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) /* 8 for CRC and VLAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) bp->rx_pg_ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) bp->rx_max_pg_ring = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) bp->rx_max_pg_ring_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) jumbo_size = size * pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) bp->rx_pg_ring_size = jumbo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) BNX2_MAX_RX_PG_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) bp->rx_max_pg_ring_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) bp->rx_copy_thresh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) bp->rx_buf_use_size = rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) /* hw alignment + build_skb() overhead*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) bp->rx_ring_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) bnx2_free_tx_skbs(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) for (i = 0; i < bp->num_tx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) if (!txr->tx_buf_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) for (j = 0; j < BNX2_TX_DESC_CNT; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) struct sk_buff *skb = tx_buf->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) int k, last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) j = BNX2_NEXT_TX_BD(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) dma_unmap_single(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) tx_buf->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) last = tx_buf->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) j = BNX2_NEXT_TX_BD(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) dma_unmap_page(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) skb_frag_size(&skb_shinfo(skb)->frags[k]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) bnx2_free_rx_skbs(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) for (i = 0; i < bp->num_rx_rings; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) if (!rxr->rx_buf_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) for (j = 0; j < bp->rx_max_ring_idx; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) u8 *data = rx_buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) dma_unmap_single(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) dma_unmap_addr(rx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) bp->rx_buf_use_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) rx_buf->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) bnx2_free_rx_page(bp, rxr, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) bnx2_free_skbs(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) bnx2_free_tx_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) bnx2_free_rx_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) rc = bnx2_reset_chip(bp, reset_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) if ((rc = bnx2_init_chip(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) bnx2_init_all_rings(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) bnx2_init_nic(struct bnx2 *bp, int reset_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) bnx2_init_phy(bp, reset_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) bnx2_set_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) bnx2_remote_phy_event(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) bnx2_shutdown_chip(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) u32 reset_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) if (bp->flags & BNX2_FLAG_NO_WOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) else if (bp->wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) return bnx2_reset_chip(bp, reset_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) bnx2_test_registers(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) int i, is_5709;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) #define BNX2_FL_NOT_5709 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) u32 rw_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) u32 ro_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) } reg_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) { 0x006c, 0, 0x00000000, 0x0000003f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) { 0x0090, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) { 0x0094, 0, 0x00000000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) { 0x1000, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) { 0x1408, 0, 0x01c00800, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) { 0x149c, 0, 0x8000ffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) { 0x14a8, 0, 0x00000000, 0x000001ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) { 0x14ac, 0, 0x0fffffff, 0x10000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) { 0x14b0, 0, 0x00000002, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) { 0x14b8, 0, 0x00000000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) { 0x14c0, 0, 0x00000000, 0x00000009 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) { 0x14c4, 0, 0x00003fff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) { 0x14cc, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) { 0x14d0, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) { 0x1800, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) { 0x1804, 0, 0x00000000, 0x00000003 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) { 0x2800, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) { 0x2804, 0, 0x00000000, 0x00003f01 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) { 0x2810, 0, 0xffff0000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) { 0x2814, 0, 0xffff0000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) { 0x2818, 0, 0xffff0000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) { 0x281c, 0, 0xffff0000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) { 0x2834, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) { 0x2840, 0, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) { 0x2844, 0, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) { 0x2848, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) { 0x284c, 0, 0xf800f800, 0x07ff07ff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) { 0x2c00, 0, 0x00000000, 0x00000011 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) { 0x2c04, 0, 0x00000000, 0x00030007 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) { 0x3c00, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) { 0x3c04, 0, 0x00000000, 0x00070000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) { 0x3c08, 0, 0x00007f71, 0x07f00000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) { 0x3c10, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) { 0x3c14, 0, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) { 0x3c18, 0, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) { 0x3c1c, 0, 0xfffff000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) { 0x3c20, 0, 0xffffff00, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) { 0x5004, 0, 0x00000000, 0x0000007f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) { 0x5008, 0, 0x0f0007ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) { 0x5c00, 0, 0x00000000, 0x00000001 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) { 0x5c04, 0, 0x00000000, 0x0003000f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) { 0x5c08, 0, 0x00000003, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) { 0x5c10, 0, 0x00000000, 0xffffffff },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) { 0x5c84, 0, 0x00000000, 0x0000f333 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) { 0x5c88, 0, 0x00000000, 0x00077373 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) { 0x5c8c, 0, 0x00000000, 0x0007f737 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) { 0x6808, 0, 0x0000ff7f, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) { 0x680c, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) { 0x6810, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) { 0x6814, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) { 0x6818, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) { 0x681c, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) { 0x6820, 0, 0x00ff00ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) { 0x6824, 0, 0x00ff00ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) { 0x6828, 0, 0x00ff00ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) { 0x682c, 0, 0x03ff03ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) { 0x6830, 0, 0x03ff03ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) { 0x6834, 0, 0x03ff03ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) { 0x6838, 0, 0x03ff03ff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) { 0x683c, 0, 0x0000ffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) { 0x6840, 0, 0x00000ff0, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) { 0x6844, 0, 0x00ffff00, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) { 0x684c, 0, 0xffffffff, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) { 0x6908, 0, 0x00000000, 0x0001ff0f },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) { 0xffff, 0, 0x00000000, 0x00000000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) is_5709 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) is_5709 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) u32 offset, rw_mask, ro_mask, save_val, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) u16 flags = reg_tbl[i].flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) if (is_5709 && (flags & BNX2_FL_NOT_5709))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) offset = (u32) reg_tbl[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) rw_mask = reg_tbl[i].rw_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) ro_mask = reg_tbl[i].ro_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) save_val = readl(bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) writel(0, bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) val = readl(bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) if ((val & rw_mask) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) goto reg_test_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if ((val & ro_mask) != (save_val & ro_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) goto reg_test_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) writel(0xffffffff, bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) val = readl(bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) if ((val & rw_mask) != rw_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) goto reg_test_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) if ((val & ro_mask) != (save_val & ro_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) goto reg_test_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) writel(save_val, bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) reg_test_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) writel(save_val, bp->regview + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) for (i = 0; i < sizeof(test_pattern) / 4; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) for (offset = 0; offset < size; offset += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) if (bnx2_reg_rd_ind(bp, start + offset) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) test_pattern[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) bnx2_test_memory(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) static struct mem_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) } mem_tbl_5706[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) { 0x60000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) { 0xa0000, 0x3000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) { 0xe0000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) { 0x120000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) { 0x1a0000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) { 0x160000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) { 0xffffffff, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) mem_tbl_5709[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) { 0x60000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) { 0xa0000, 0x3000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) { 0xe0000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) { 0x120000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) { 0x1a0000, 0x4000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) { 0xffffffff, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) struct mem_entry *mem_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) mem_tbl = mem_tbl_5709;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) mem_tbl = mem_tbl_5706;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) mem_tbl[i].len)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) #define BNX2_MAC_LOOPBACK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) #define BNX2_PHY_LOOPBACK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) unsigned int pkt_size, num_pkts, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) unsigned char *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) u16 rx_start_idx, rx_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) dma_addr_t map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) struct bnx2_tx_bd *txbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) struct bnx2_sw_bd *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) struct l2_fhdr *rx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) struct bnx2_tx_ring_info *txr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) struct bnx2_rx_ring_info *rxr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) tx_napi = bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) txr = &tx_napi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) rxr = &bnapi->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) if (loopback_mode == BNX2_MAC_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) bp->loopback = MAC_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) bnx2_set_mac_loopback(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) else if (loopback_mode == BNX2_PHY_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) bp->loopback = PHY_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) bnx2_set_phy_loopback(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) skb = netdev_alloc_skb(bp->dev, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) packet = skb_put(skb, pkt_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) memset(packet + ETH_ALEN, 0x0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) for (i = 14; i < pkt_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) packet[i] = (unsigned char) (i & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) if (dma_mapping_error(&bp->pdev->dev, map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) BNX2_WR(bp, BNX2_HC_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) BNX2_RD(bp, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) num_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) txbd->tx_bd_haddr_hi = (u64) map >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) txbd->tx_bd_mss_nbytes = pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) num_pkts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) txr->tx_prod_bseq += pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) BNX2_WR(bp, BNX2_HC_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) BNX2_RD(bp, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) goto loopback_test_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) rx_idx = bnx2_get_hw_rx_cons(bnapi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) if (rx_idx != rx_start_idx + num_pkts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) goto loopback_test_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) rx_buf = &rxr->rx_buf_ring[rx_start_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) data = rx_buf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) rx_hdr = get_l2_fhdr(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) dma_sync_single_for_cpu(&bp->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) dma_unmap_addr(rx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) if (rx_hdr->l2_fhdr_status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) (L2_FHDR_ERRORS_BAD_CRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) L2_FHDR_ERRORS_PHY_DECODE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) L2_FHDR_ERRORS_ALIGNMENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) L2_FHDR_ERRORS_TOO_SHORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) L2_FHDR_ERRORS_GIANT_FRAME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) goto loopback_test_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) goto loopback_test_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) for (i = 14; i < pkt_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) if (*(data + i) != (unsigned char) (i & 0xff)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) goto loopback_test_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) loopback_test_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) bp->loopback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) #define BNX2_MAC_LOOPBACK_FAILED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) #define BNX2_PHY_LOOPBACK_FAILED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) BNX2_PHY_LOOPBACK_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) bnx2_test_loopback(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) return BNX2_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) bnx2_init_phy(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) rc |= BNX2_MAC_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) rc |= BNX2_PHY_LOOPBACK_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) #define NVRAM_SIZE 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) #define CRC32_RESIDUAL 0xdebb20e3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) bnx2_test_nvram(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) __be32 buf[NVRAM_SIZE / 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) u8 *data = (u8 *) buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) u32 magic, csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) goto test_nvram_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) magic = be32_to_cpu(buf[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) if (magic != 0x669955aa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) goto test_nvram_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) goto test_nvram_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) csum = ether_crc_le(0x100, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) if (csum != CRC32_RESIDUAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) goto test_nvram_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) csum = ether_crc_le(0x100, data + 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) if (csum != CRC32_RESIDUAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) test_nvram_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) bnx2_test_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) u32 bmsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) if (bp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) bnx2_enable_bmsr1(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) bnx2_disable_bmsr1(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) if (bmsr & BMSR_LSTATUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) bnx2_test_intr(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) u16 status_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) /* This register is not touched during run-time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) BNX2_RD(bp, BNX2_HC_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) status_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) msleep_interruptible(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) if (i < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) /* Determining link for parallel detection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) bnx2_5706_serdes_has_link(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) u32 mode_ctl, an_dbg, exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) bnx2_5706_serdes_timer(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) int check_link = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) spin_lock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) if (bp->serdes_an_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) bp->serdes_an_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) check_link = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) bp->current_interval = BNX2_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) if (bnx2_5706_serdes_has_link(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) bmcr &= ~BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) u32 phy2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) bnx2_write_phy(bp, 0x17, 0x0f01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) bnx2_read_phy(bp, 0x15, &phy2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) if (phy2 & 0x20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) bmcr |= BMCR_ANENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) bp->current_interval = BNX2_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) if (check_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) bnx2_5706s_force_link_dn(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) bnx2_set_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) bnx2_set_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) spin_unlock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) bnx2_5708_serdes_timer(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) bp->serdes_an_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) spin_lock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) if (bp->serdes_an_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) bp->serdes_an_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) if (bmcr & BMCR_ANENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) bnx2_enable_forced_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) bnx2_disable_forced_2g5(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) bp->serdes_an_pending = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) bp->current_interval = BNX2_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) bp->current_interval = BNX2_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) spin_unlock(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) bnx2_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) struct bnx2 *bp = from_timer(bp, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) if (atomic_read(&bp->intr_sem) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) goto bnx2_restart_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) BNX2_FLAG_USING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) bnx2_chk_missed_msi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) bnx2_send_heart_beat(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) bp->stats_blk->stat_FwRxDrop =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) /* workaround occasional corrupted counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) BNX2_HC_COMMAND_STATS_NOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) bnx2_5706_serdes_timer(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) bnx2_5708_serdes_timer(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) bnx2_restart_timer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) mod_timer(&bp->timer, jiffies + bp->current_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) bnx2_request_irq(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) struct bnx2_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) int rc = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) flags = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) irq = &bp->irq_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) rc = request_irq(irq->vector, irq->handler, flags, irq->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) &bp->bnx2_napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) irq->requested = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) __bnx2_free_irq(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) struct bnx2_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) irq = &bp->irq_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) if (irq->requested)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) free_irq(irq->vector, &bp->bnx2_napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) irq->requested = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) bnx2_free_irq(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) __bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) if (bp->flags & BNX2_FLAG_USING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) pci_disable_msi(bp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) else if (bp->flags & BNX2_FLAG_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259) pci_disable_msix(bp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) int i, total_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) struct net_device *dev = bp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) const int len = sizeof(bp->irq_tbl[0].name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) bnx2_setup_msix_tbl(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) /* Need to flush the previous three writes to ensure MSI-X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) * is setup properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) msix_ent[i].entry = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) msix_ent[i].vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) total_vecs = msix_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) total_vecs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) BNX2_MIN_MSIX_VEC, total_vecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) if (total_vecs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) msix_vecs = total_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) msix_vecs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) bp->irq_nvecs = msix_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) for (i = 0; i < total_vecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) bp->irq_tbl[i].vector = msix_ent[i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) bp->irq_tbl[i].handler = bnx2_msi_1shot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) int cpus = netif_get_num_default_rss_queues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) int msix_vecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) if (!bp->num_req_rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) else if (!bp->num_req_tx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) msix_vecs = max(cpus, bp->num_req_rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) msix_vecs = min(msix_vecs, RX_MAX_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) bp->irq_tbl[0].handler = bnx2_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) strcpy(bp->irq_tbl[0].name, bp->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) bp->irq_nvecs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) bp->irq_tbl[0].vector = bp->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) bnx2_enable_msix(bp, msix_vecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) !(bp->flags & BNX2_FLAG_USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) if (pci_enable_msi(bp->pdev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) bp->flags |= BNX2_FLAG_USING_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) bp->irq_tbl[0].handler = bnx2_msi_1shot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) bp->irq_tbl[0].handler = bnx2_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) bp->irq_tbl[0].vector = bp->pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) if (!bp->num_req_tx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) if (!bp->num_req_rx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) bp->num_rx_rings = bp->irq_nvecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) /* Called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) bnx2_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) rc = bnx2_request_firmware(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) bnx2_disable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) rc = bnx2_setup_int_mode(bp, disable_msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) goto open_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) bnx2_init_napi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) bnx2_napi_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) rc = bnx2_alloc_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) goto open_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) rc = bnx2_request_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) goto open_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) rc = bnx2_init_nic(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) goto open_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) mod_timer(&bp->timer, jiffies + bp->current_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) atomic_set(&bp->intr_sem, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) bnx2_enable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) if (bp->flags & BNX2_FLAG_USING_MSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) /* Test MSI to make sure it is working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) * If MSI test fails, go back to INTx mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) if (bnx2_test_intr(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) bnx2_disable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) bnx2_setup_int_mode(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) rc = bnx2_init_nic(bp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) rc = bnx2_request_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) del_timer_sync(&bp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) goto open_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) bnx2_enable_int(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) if (bp->flags & BNX2_FLAG_USING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) netdev_info(dev, "using MSI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) else if (bp->flags & BNX2_FLAG_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) netdev_info(dev, "using MSIX\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) open_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) bnx2_napi_disable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) bnx2_free_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) bnx2_del_napi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) bnx2_release_firmware(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) bnx2_reset_task(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) u16 pcicmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) if (!netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) if (!(pcicmd & PCI_COMMAND_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) /* in case PCI block has reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) pci_restore_state(bp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) pci_save_state(bp->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) rc = bnx2_init_nic(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) netdev_err(bp->dev, "failed to reset NIC, closing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) bnx2_napi_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) dev_close(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) atomic_set(&bp->intr_sem, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) #define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) bnx2_dump_ftq(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) u32 reg, bdidx, cid, valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) struct net_device *dev = bp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) static const struct ftq_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) u32 off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) } ftq_arr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) BNX2_FTQ_ENTRY(RV2P_P),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) BNX2_FTQ_ENTRY(RV2P_T),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) BNX2_FTQ_ENTRY(RV2P_M),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) BNX2_FTQ_ENTRY(TBDR_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) BNX2_FTQ_ENTRY(TDMA_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) BNX2_FTQ_ENTRY(TXP_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) BNX2_FTQ_ENTRY(TXP_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) BNX2_FTQ_ENTRY(TPAT_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) BNX2_FTQ_ENTRY(RXP_C),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) BNX2_FTQ_ENTRY(RXP_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) BNX2_FTQ_ENTRY(COM_COMXQ_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) BNX2_FTQ_ENTRY(COM_COMTQ_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) BNX2_FTQ_ENTRY(COM_COMQ_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) BNX2_FTQ_ENTRY(CP_CPQ_),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) netdev_err(dev, "<--- start FTQ dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) bnx2_reg_rd_ind(bp, ftq_arr[i].off));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) netdev_err(dev, "CPU states:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) reg, bnx2_reg_rd_ind(bp, reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) bnx2_reg_rd_ind(bp, reg + 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) bnx2_reg_rd_ind(bp, reg + 8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) bnx2_reg_rd_ind(bp, reg + 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) bnx2_reg_rd_ind(bp, reg + 0x1c),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) bnx2_reg_rd_ind(bp, reg + 0x20));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) netdev_err(dev, "<--- end FTQ dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) netdev_err(dev, "<--- start TBDC dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) netdev_err(dev, "TBDC free cnt: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) for (i = 0; i < 0x20; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) cid = BNX2_RD(bp, BNX2_TBDC_CID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) bdidx >> 24, (valid >> 8) & 0x0ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) netdev_err(dev, "<--- end TBDC dump --->\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) bnx2_dump_state(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) struct net_device *dev = bp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) u32 val1, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) atomic_read(&bp->intr_sem), val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) if (bp->flags & BNX2_FLAG_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) netdev_err(dev, "DEBUG: PBA[%08x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) bnx2_dump_ftq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) bnx2_dump_state(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) bnx2_dump_mcp_state(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) /* This allows the netif to be shutdown gracefully before resetting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) schedule_work(&bp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) /* Called with netif_tx_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) * netif_wake_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) dma_addr_t mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) struct bnx2_tx_bd *txbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) struct bnx2_sw_tx_bd *tx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) u32 len, vlan_tag_flags, last_frag, mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) u16 prod, ring_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) struct bnx2_napi *bnapi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) struct bnx2_tx_ring_info *txr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) /* Determine which tx ring we will be placed on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) i = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) txr = &bnapi->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) txq = netdev_get_tx_queue(dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) if (unlikely(bnx2_tx_avail(bp, txr) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) (skb_shinfo(skb)->nr_frags + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) prod = txr->tx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) ring_prod = BNX2_TX_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) vlan_tag_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) vlan_tag_flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) if ((mss = skb_shinfo(skb)->gso_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) u32 tcp_opt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) struct iphdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) tcp_opt_len = tcp_optlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) u32 tcp_off = skb_transport_offset(skb) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) sizeof(struct ipv6hdr) - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) TX_BD_FLAGS_SW_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) if (likely(tcp_off == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) tcp_off >>= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) vlan_tag_flags |= ((tcp_off & 0x3) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) TX_BD_FLAGS_TCP6_OFF0_SHL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) ((tcp_off & 0x10) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) TX_BD_FLAGS_TCP6_OFF4_SHL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) if (tcp_opt_len || (iph->ihl > 5)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) vlan_tag_flags |= ((iph->ihl - 5) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) (tcp_opt_len >> 2)) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) mss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) if (dma_mapping_error(&bp->pdev->dev, mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) tx_buf = &txr->tx_buf_ring[ring_prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) tx_buf->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) dma_unmap_addr_set(tx_buf, mapping, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) txbd = &txr->tx_desc_ring[ring_prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) txbd->tx_bd_mss_nbytes = len | (mss << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) last_frag = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) tx_buf->nr_frags = last_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) tx_buf->is_gso = skb_is_gso(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) for (i = 0; i < last_frag; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) prod = BNX2_NEXT_TX_BD(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) ring_prod = BNX2_TX_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) txbd = &txr->tx_desc_ring[ring_prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) if (dma_mapping_error(&bp->pdev->dev, mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) goto dma_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) txbd->tx_bd_mss_nbytes = len | (mss << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) /* Sync BD data before updating TX mailbox */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) netdev_tx_sent_queue(txq, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) prod = BNX2_NEXT_TX_BD(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) txr->tx_prod_bseq += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) BNX2_WR16(bp, txr->tx_bidx_addr, prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) txr->tx_prod = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) /* netif_tx_stop_queue() must be done before checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) * tx index in bnx2_tx_avail() below, because in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) * bnx2_tx_int(), we update tx index before checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) * netif_tx_queue_stopped().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) dma_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) /* save value of frag that failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) last_frag = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) /* start back at beginning and unmap skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) prod = txr->tx_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) ring_prod = BNX2_TX_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) tx_buf = &txr->tx_buf_ring[ring_prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) tx_buf->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) skb_headlen(skb), PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) /* unmap remaining mapped pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) for (i = 0; i < last_frag; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) prod = BNX2_NEXT_TX_BD(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) ring_prod = BNX2_TX_RING_IDX(prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) tx_buf = &txr->tx_buf_ring[ring_prod];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) skb_frag_size(&skb_shinfo(skb)->frags[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) PCI_DMA_TODEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) /* Called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) bnx2_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) bnx2_disable_int_sync(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) bnx2_napi_disable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) netif_tx_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) del_timer_sync(&bp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) bnx2_shutdown_chip(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) bnx2_free_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) bnx2_del_napi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) bp->link_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) netif_carrier_off(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) bnx2_save_stats(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) u32 *hw_stats = (u32 *) bp->stats_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) u32 *temp_stats = (u32 *) bp->temp_stats_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) /* The 1st 10 counters are 64-bit counters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) for (i = 0; i < 20; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) u32 hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) u64 lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) hi = temp_stats[i] + hw_stats[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) if (lo > 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) hi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) temp_stats[i] = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) temp_stats[i + 1] = lo & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) for ( ; i < sizeof(struct statistics_block) / 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) temp_stats[i] += hw_stats[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) #define GET_64BIT_NET_STATS64(ctr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) #define GET_64BIT_NET_STATS(ctr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) #define GET_32BIT_NET_STATS(ctr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) (unsigned long) (bp->stats_blk->ctr + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) bp->temp_stats_blk->ctr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) if (!bp->stats_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) net_stats->rx_packets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) net_stats->tx_packets =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) net_stats->rx_bytes =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) GET_64BIT_NET_STATS(stat_IfHCInOctets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) net_stats->tx_bytes =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) GET_64BIT_NET_STATS(stat_IfHCOutOctets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) net_stats->multicast =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) net_stats->collisions =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) net_stats->rx_length_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) net_stats->rx_over_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) net_stats->rx_frame_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) net_stats->rx_crc_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) net_stats->rx_errors = net_stats->rx_length_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) net_stats->rx_over_errors + net_stats->rx_frame_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) net_stats->rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) net_stats->tx_aborted_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) net_stats->tx_carrier_errors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) net_stats->tx_carrier_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) net_stats->tx_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) net_stats->tx_aborted_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) net_stats->tx_carrier_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) net_stats->rx_missed_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) GET_32BIT_NET_STATS(stat_FwRxDrop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) /* All ethtool functions called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) bnx2_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) int support_serdes = 0, support_copper = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) u32 supported, advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) supported = SUPPORTED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) support_serdes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) support_copper = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) } else if (bp->phy_port == PORT_FIBRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) support_serdes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) support_copper = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) if (support_serdes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) supported |= SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) SUPPORTED_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) supported |= SUPPORTED_2500baseX_Full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) if (support_copper) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) supported |= SUPPORTED_10baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) SUPPORTED_10baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) SUPPORTED_100baseT_Half |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) SUPPORTED_100baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) SUPPORTED_1000baseT_Full |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) SUPPORTED_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) cmd->base.port = bp->phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) advertising = bp->advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) if (bp->autoneg & AUTONEG_SPEED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) cmd->base.autoneg = AUTONEG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) cmd->base.autoneg = AUTONEG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) if (netif_carrier_ok(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) cmd->base.speed = bp->line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) cmd->base.duplex = bp->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) cmd->base.eth_tp_mdix = ETH_TP_MDI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) cmd->base.speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) cmd->base.duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) cmd->base.phy_address = bp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) bnx2_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) u8 autoneg = bp->autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) u8 req_duplex = bp->req_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) u16 req_line_speed = bp->req_line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) u32 advertising = bp->advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) if (cmd->base.port != bp->phy_port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) /* If device is down, we can store the settings only if the user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) * is setting the currently active port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) if (!netif_running(dev) && cmd->base.port != bp->phy_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) if (cmd->base.autoneg == AUTONEG_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) autoneg |= AUTONEG_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) ethtool_convert_link_mode_to_legacy_u32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) &advertising, cmd->link_modes.advertising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) if (cmd->base.port == PORT_TP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) advertising &= ETHTOOL_ALL_COPPER_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) if (!advertising)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) advertising = ETHTOOL_ALL_COPPER_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) advertising &= ETHTOOL_ALL_FIBRE_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) if (!advertising)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) advertising = ETHTOOL_ALL_FIBRE_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) advertising |= ADVERTISED_Autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) u32 speed = cmd->base.speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) if (cmd->base.port == PORT_FIBRE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) if ((speed != SPEED_1000 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) speed != SPEED_2500) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) (cmd->base.duplex != DUPLEX_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) if (speed == SPEED_2500 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) } else if (speed == SPEED_1000 || speed == SPEED_2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) goto err_out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) autoneg &= ~AUTONEG_SPEED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) req_line_speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) req_duplex = cmd->base.duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) advertising = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) bp->autoneg = autoneg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) bp->advertising = advertising;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) bp->req_line_speed = req_line_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) bp->req_duplex = req_duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) /* If device is down, the new settings will be picked up when it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) * brought up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) err = bnx2_setup_phy(bp, cmd->base.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) err_out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) #define BNX2_REGDUMP_LEN (32 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) bnx2_get_regs_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) return BNX2_REGDUMP_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) u32 *p = _p, i, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) u8 *orig_p = _p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) static const u32 reg_boundaries[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) 0x0000, 0x0098, 0x0400, 0x045c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) 0x0800, 0x0880, 0x0c00, 0x0c10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) 0x0c30, 0x0d08, 0x1000, 0x101c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) 0x1040, 0x1048, 0x1080, 0x10a4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) 0x1400, 0x1490, 0x1498, 0x14f0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) 0x1500, 0x155c, 0x1580, 0x15dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) 0x1600, 0x1658, 0x1680, 0x16d8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) 0x1800, 0x1820, 0x1840, 0x1854,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) 0x1880, 0x1894, 0x1900, 0x1984,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) 0x1c80, 0x1c94, 0x1d00, 0x1d84,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) 0x2000, 0x2030, 0x23c0, 0x2400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) 0x2800, 0x2820, 0x2830, 0x2850,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) 0x2b40, 0x2c10, 0x2fc0, 0x3058,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) 0x3c00, 0x3c94, 0x4000, 0x4010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) 0x4080, 0x4090, 0x43c0, 0x4458,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) 0x4c00, 0x4c18, 0x4c40, 0x4c54,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) 0x4fc0, 0x5010, 0x53c0, 0x5444,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) 0x5c00, 0x5c18, 0x5c80, 0x5c90,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) 0x5fc0, 0x6000, 0x6400, 0x6428,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) 0x6800, 0x6848, 0x684c, 0x6860,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) 0x6888, 0x6910, 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) regs->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) memset(p, 0, BNX2_REGDUMP_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) offset = reg_boundaries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) p += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) while (offset < BNX2_REGDUMP_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) *p++ = BNX2_RD(bp, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) if (offset == reg_boundaries[i + 1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) offset = reg_boundaries[i + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) p = (u32 *) (orig_p + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) i += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) if (bp->flags & BNX2_FLAG_NO_WOL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) wol->supported = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) if (bp->wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) wol->wolopts = WAKE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) memset(&wol->sopass, 0, sizeof(wol->sopass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) if (wol->wolopts & ~WAKE_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) if (wol->wolopts & WAKE_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) if (bp->flags & BNX2_FLAG_NO_WOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) bp->wol = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) bp->wol = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) bnx2_nway_reset(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) u32 bmcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) if (!(bp->autoneg & AUTONEG_SPEED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) rc = bnx2_setup_remote_phy(bp, bp->phy_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) /* Force a link down visible on the other side */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) msleep(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) bp->serdes_an_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) mod_timer(&bp->timer, jiffies + bp->current_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) bmcr &= ~BMCR_LOOPBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) static u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) bnx2_get_link(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) return bp->link_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) bnx2_get_eeprom_len(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) if (!bp->flash_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) return (int) bp->flash_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) u8 *eebuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) /* parameters already validated in ethtool_get_eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) u8 *eebuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) /* parameters already validated in ethtool_set_eeprom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) memset(coal, 0, sizeof(struct ethtool_coalesce));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) coal->rx_coalesce_usecs = bp->rx_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) coal->tx_coalesce_usecs = bp->tx_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) coal->stats_block_coalesce_usecs = bp->stats_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) if (bp->rx_quick_cons_trip_int > 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) bp->rx_quick_cons_trip_int = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) bp->stats_ticks = coal->stats_block_coalesce_usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) bp->stats_ticks = USEC_PER_SEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) if (netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) bnx2_init_nic(bp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) ering->rx_pending = bp->rx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) ering->rx_jumbo_pending = bp->rx_pg_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) ering->tx_pending = bp->tx_ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) if (netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) /* Reset will erase chipset stats; save them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) bnx2_save_stats(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) if (reset_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) bnx2_del_napi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) __bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) bnx2_free_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) bnx2_set_rx_ring_size(bp, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) bp->tx_ring_size = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) if (netif_running(bp->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) if (reset_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) rc = bnx2_setup_int_mode(bp, disable_msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) bnx2_init_napi(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) rc = bnx2_alloc_mem(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) rc = bnx2_request_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) rc = bnx2_init_nic(bp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) bnx2_napi_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) dev_close(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) mutex_lock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) /* Let cnic know about the new status block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) bnx2_setup_cnic_irq_info(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) mutex_unlock(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) (ering->tx_pending <= MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) bp->req_flow_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) if (epause->rx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) bp->req_flow_ctrl |= FLOW_CTRL_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) if (epause->tx_pause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) bp->req_flow_ctrl |= FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) if (epause->autoneg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) bp->autoneg |= AUTONEG_FLOW_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) bp->autoneg &= ~AUTONEG_FLOW_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) bnx2_setup_phy(bp, bp->phy_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) } bnx2_stats_str_arr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) { "rx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) { "rx_error_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) { "tx_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) { "tx_error_bytes" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) { "rx_ucast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) { "rx_mcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) { "rx_bcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) { "tx_ucast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) { "tx_mcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) { "tx_bcast_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) { "tx_mac_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) { "tx_carrier_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) { "rx_crc_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) { "rx_align_errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) { "tx_single_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) { "tx_multi_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) { "tx_deferred" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) { "tx_excess_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) { "tx_late_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) { "tx_total_collisions" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) { "rx_fragments" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) { "rx_jabbers" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) { "rx_undersize_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) { "rx_oversize_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) { "rx_64_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) { "rx_65_to_127_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) { "rx_128_to_255_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) { "rx_256_to_511_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) { "rx_512_to_1023_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) { "rx_1024_to_1522_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) { "rx_1523_to_9022_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) { "tx_64_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) { "tx_65_to_127_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) { "tx_128_to_255_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) { "tx_256_to_511_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) { "tx_512_to_1023_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) { "tx_1024_to_1522_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) { "tx_1523_to_9022_byte_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) { "rx_xon_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) { "rx_xoff_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) { "tx_xon_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) { "tx_xoff_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) { "rx_mac_ctrl_frames" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) { "rx_filtered_packets" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) { "rx_ftq_discards" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) { "rx_discards" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) { "rx_fw_discards" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) #define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) STATS_OFFSET32(stat_IfHCInOctets_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) STATS_OFFSET32(stat_IfHCInBadOctets_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) STATS_OFFSET32(stat_IfHCOutOctets_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) STATS_OFFSET32(stat_Dot3StatsFCSErrors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) STATS_OFFSET32(stat_Dot3StatsLateCollisions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) STATS_OFFSET32(stat_EtherStatsCollisions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) STATS_OFFSET32(stat_EtherStatsFragments),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) STATS_OFFSET32(stat_EtherStatsJabbers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) STATS_OFFSET32(stat_EtherStatsUndersizePkts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) STATS_OFFSET32(stat_XonPauseFramesReceived),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) STATS_OFFSET32(stat_XoffPauseFramesReceived),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) STATS_OFFSET32(stat_OutXonSent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) STATS_OFFSET32(stat_OutXoffSent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) STATS_OFFSET32(stat_MacControlFramesReceived),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) STATS_OFFSET32(stat_IfInFTQDiscards),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) STATS_OFFSET32(stat_IfInMBUFDiscards),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) STATS_OFFSET32(stat_FwRxDrop),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) * skipped because of errata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) 8,0,8,8,8,8,8,8,8,8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) 4,0,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) 4,4,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) 4,4,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) 4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) 8,0,8,8,8,8,8,8,8,8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) 4,4,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) 4,4,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) 4,4,4,4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) 4,4,4,4,4,4,4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) #define BNX2_NUM_TESTS 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) char string[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) { "register_test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) { "memory_test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) { "loopback_test (offline)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) { "nvram_test (online)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) { "interrupt_test (online)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) { "link_test (online)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) bnx2_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) return BNX2_NUM_TESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) return BNX2_NUM_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) if (etest->flags & ETH_TEST_FL_OFFLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601) bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) if (bnx2_test_registers(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) buf[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) if (bnx2_test_memory(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) buf[1] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) if ((buf[2] = bnx2_test_loopback(bp)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) if (!netif_running(bp->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) bnx2_shutdown_chip(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) bnx2_init_nic(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) /* wait for link up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) for (i = 0; i < 7; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) if (bp->link_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) msleep_interruptible(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) if (bnx2_test_nvram(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) buf[3] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) if (bnx2_test_intr(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) buf[4] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) if (bnx2_test_link(bp) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) buf[5] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) etest->flags |= ETH_TEST_FL_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) memcpy(buf, bnx2_stats_str_arr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) sizeof(bnx2_stats_str_arr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) case ETH_SS_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) memcpy(buf, bnx2_tests_str_arr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) sizeof(bnx2_tests_str_arr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) bnx2_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) struct ethtool_stats *stats, u64 *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) u32 *hw_stats = (u32 *) bp->stats_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) u32 *temp_stats = (u32 *) bp->temp_stats_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) u8 *stats_len_arr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) if (!hw_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) stats_len_arr = bnx2_5706_stats_len_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) stats_len_arr = bnx2_5708_stats_len_arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) for (i = 0; i < BNX2_NUM_STATS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) if (stats_len_arr[i] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) /* skip this counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) buf[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) offset = bnx2_stats_offset_arr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) if (stats_len_arr[i] == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) /* 4-byte counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) buf[i] = (u64) *(hw_stats + offset) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) *(temp_stats + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) /* 8-byte counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) buf[i] = (((u64) *(hw_stats + offset)) << 32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) *(hw_stats + offset + 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) (((u64) *(temp_stats + offset)) << 32) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) *(temp_stats + offset + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) case ETHTOOL_ID_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715) bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) return 1; /* cycle on/off once per second */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) case ETHTOOL_ID_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) BNX2_EMAC_LED_1000MB_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) BNX2_EMAC_LED_100MB_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) BNX2_EMAC_LED_10MB_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724) BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) BNX2_EMAC_LED_TRAFFIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728) case ETHTOOL_ID_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) case ETHTOOL_ID_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) BNX2_WR(bp, BNX2_EMAC_LED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) bnx2_set_features(struct net_device *dev, netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) /* TSO with VLAN tag won't work with current firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) if (features & NETIF_F_HW_VLAN_CTAG_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) dev->vlan_features &= ~NETIF_F_ALL_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) bnx2_netif_stop(bp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) dev->features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) bnx2_set_rx_mode(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) bnx2_netif_start(bp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) static void bnx2_get_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) u32 max_rx_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771) u32 max_tx_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) max_rx_rings = RX_MAX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) max_tx_rings = TX_MAX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) channels->max_rx = max_rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) channels->max_tx = max_tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) channels->max_other = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) channels->max_combined = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) channels->rx_count = bp->num_rx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) channels->tx_count = bp->num_tx_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) channels->other_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) channels->combined_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) static int bnx2_set_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) u32 max_rx_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) u32 max_tx_rings = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797) max_rx_rings = RX_MAX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) max_tx_rings = TX_MAX_RINGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) if (channels->rx_count > max_rx_rings ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) channels->tx_count > max_tx_rings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) bp->num_req_rx_rings = channels->rx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) bp->num_req_tx_rings = channels->tx_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808) rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) bp->tx_ring_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) static const struct ethtool_ops bnx2_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) ETHTOOL_COALESCE_MAX_FRAMES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) ETHTOOL_COALESCE_USECS_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) ETHTOOL_COALESCE_STATS_BLOCK_USECS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) .get_drvinfo = bnx2_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) .get_regs_len = bnx2_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) .get_regs = bnx2_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) .get_wol = bnx2_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824) .set_wol = bnx2_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) .nway_reset = bnx2_nway_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) .get_link = bnx2_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) .get_eeprom_len = bnx2_get_eeprom_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) .get_eeprom = bnx2_get_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) .set_eeprom = bnx2_set_eeprom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) .get_coalesce = bnx2_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) .set_coalesce = bnx2_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) .get_ringparam = bnx2_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) .set_ringparam = bnx2_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) .get_pauseparam = bnx2_get_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) .set_pauseparam = bnx2_set_pauseparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) .self_test = bnx2_self_test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) .get_strings = bnx2_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) .set_phys_id = bnx2_set_phys_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839) .get_ethtool_stats = bnx2_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) .get_sset_count = bnx2_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) .get_channels = bnx2_get_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) .set_channels = bnx2_set_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) .get_link_ksettings = bnx2_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) .set_link_ksettings = bnx2_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) /* Called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) struct mii_ioctl_data *data = if_mii(ifr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) switch(cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) case SIOCGMIIPHY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) data->phy_id = bp->phy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) case SIOCGMIIREG: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861) u32 mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) data->val_out = mii_regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878) case SIOCSMIIREG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) spin_lock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) spin_unlock_bh(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) /* do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) /* Called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) bnx2_change_mac_addr(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) /* Called with rtnl_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) bnx2_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) poll_bnx2(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) struct bnx2_irq *irq = &bp->irq_tbl[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) disable_irq(irq->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) irq->handler(irq->vector, &bp->bnx2_napi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) enable_irq(irq->vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944) bnx2_get_5709_media(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947) u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) u32 strap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) if (bp->func == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) switch (strap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964) case 0x4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) case 0x5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966) case 0x6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971) switch (strap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974) case 0x4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982) bnx2_get_pci_speed(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988) u32 clkreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990) bp->flags |= BNX2_FLAG_PCIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995) switch (clkreg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997) bp->bus_speed_mhz = 133;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001) bp->bus_speed_mhz = 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) bp->bus_speed_mhz = 66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011) bp->bus_speed_mhz = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017) bp->bus_speed_mhz = 33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023) bp->bus_speed_mhz = 66;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025) bp->bus_speed_mhz = 33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) bp->flags |= BNX2_FLAG_PCI_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) bnx2_read_vpd_fw_ver(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036) int rc, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) unsigned int block_end, rosize, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) #define BNX2_VPD_NVRAM_OFFSET 0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041) #define BNX2_VPD_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) #define BNX2_MAX_VER_SLEN 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) data = kmalloc(256, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049) BNX2_VPD_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053) for (i = 0; i < BNX2_VPD_LEN; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) data[i] = data[i + BNX2_VPD_LEN + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055) data[i + 1] = data[i + BNX2_VPD_LEN + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056) data[i + 2] = data[i + BNX2_VPD_LEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) data[i + 3] = data[i + BNX2_VPD_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) if (i < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064) rosize = pci_vpd_lrdt_size(&data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065) i += PCI_VPD_LRDT_TAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) block_end = i + rosize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) if (block_end > BNX2_VPD_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) j = pci_vpd_find_info_keyword(data, i, rosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072) PCI_VPD_RO_KEYWORD_MFR_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) if (j < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076) len = pci_vpd_info_field_size(&data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) j += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) if (j + len > block_end || len != 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080) memcmp(&data[j], "1028", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083) j = pci_vpd_find_info_keyword(data, i, rosize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) PCI_VPD_RO_KEYWORD_VENDOR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) if (j < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) len = pci_vpd_info_field_size(&data[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090) j += PCI_VPD_INFO_FLD_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092) goto vpd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) memcpy(bp->fw_version, &data[j], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095) bp->fw_version[len] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) vpd_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) kfree(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102) bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) struct bnx2 *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) int rc, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) u64 dma_mask, persist_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) SET_NETDEV_DEV(dev, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) bp->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) bp->phy_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) bp->temp_stats_blk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119) if (!bp->temp_stats_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) /* enable device (incl. PCI PM wakeup), and bus-mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131) if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) "Cannot find PCI device base address, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) goto err_out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) rc = pci_request_regions(pdev, DRV_MODULE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) goto err_out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) bp->pm_cap = pdev->pm_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) if (bp->pm_cap == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) "Cannot find power management capability, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151) goto err_out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) bp->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) bp->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) spin_lock_init(&bp->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158) spin_lock_init(&bp->indirect_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) mutex_init(&bp->cnic_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162) INIT_WORK(&bp->reset_task, bnx2_reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) TX_MAX_TSS_RINGS + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166) if (!bp->regview) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) dev_err(&pdev->dev, "Cannot map register space, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) goto err_out_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) /* Configure byte swap and enable write to the reg_window registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) * Rely on CPU to do target byte swapping on big endian systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174) * The chip's target access swapping will not swap all accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182) if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) if (!pci_is_pcie(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184) dev_err(&pdev->dev, "Not PCIE, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188) bp->flags |= BNX2_FLAG_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190) bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192) /* AER (Advanced Error Reporting) hooks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) err = pci_enable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) bp->flags |= BNX2_FLAG_AER_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) if (bp->pcix_cap == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) "Cannot find PCIX capability, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) bp->flags |= BNX2_FLAG_BROKEN_STATS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) if (pdev->msix_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) bp->flags |= BNX2_FLAG_MSIX_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215) BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) if (pdev->msi_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) bp->flags |= BNX2_FLAG_MSI_CAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) /* 5708 cannot support DMA addresses > 40-bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) /* Configure DMA attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) if (pci_set_dma_mask(pdev, dma_mask) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228) dev->features |= NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229) rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) "pci_set_consistent_dma_mask failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236) dev_err(&pdev->dev, "System does not support DMA, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) if (!(bp->flags & BNX2_FLAG_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241) bnx2_get_pci_speed(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) /* 5706A0 may falsely detect SERR and PERR. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) reg = BNX2_RD(bp, PCI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246) reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247) BNX2_WR(bp, PCI_COMMAND, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) !(bp->flags & BNX2_FLAG_PCIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) "5706 A1 can only be used in a PCIX bus, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) bnx2_init_nvram(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) bp->func = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) BNX2_SHM_HDR_SIGNATURE_SIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265) u32 off = bp->func << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269) bp->shmem_base = HOST_VIEW_SHMEM_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) /* Get the permanent MAC address. First we need to make sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) * firmware is actually running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274) reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) BNX2_DEV_INFO_SIGNATURE_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) dev_err(&pdev->dev, "Firmware not running, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) bnx2_read_vpd_fw_ver(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285) j = strlen(bp->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287) for (i = 0; i < 3 && j < 24; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) u8 num, k, skip0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291) bp->fw_version[j++] = 'b';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) bp->fw_version[j++] = 'c';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) bp->fw_version[j++] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295) num = (u8) (reg >> (24 - (i * 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297) if (num >= k || !skip0 || k == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298) bp->fw_version[j++] = (num / k) + '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) skip0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) if (i != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) bp->fw_version[j++] = '.';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305) reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307) bp->wol = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) bp->flags |= BNX2_FLAG_ASF_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) for (i = 0; i < 30; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) if (reg & BNX2_CONDITION_MFW_RUN_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) reg &= BNX2_CONDITION_MFW_RUN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321) if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) reg != BNX2_CONDITION_MFW_RUN_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) if (j < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) bp->fw_version[j++] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) for (i = 0; i < 3 && j < 28; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328) reg = bnx2_reg_rd_ind(bp, addr + i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) reg = be32_to_cpu(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) memcpy(&bp->fw_version[j], ®, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331) j += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335) reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) bp->mac_addr[0] = (u8) (reg >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337) bp->mac_addr[1] = (u8) reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339) reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) bp->mac_addr[2] = (u8) (reg >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) bp->mac_addr[3] = (u8) (reg >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) bp->mac_addr[4] = (u8) (reg >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343) bp->mac_addr[5] = (u8) reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345) bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) bnx2_set_rx_ring_size(bp, 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) bp->tx_quick_cons_trip_int = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349) bp->tx_quick_cons_trip = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) bp->tx_ticks_int = 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351) bp->tx_ticks = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353) bp->rx_quick_cons_trip_int = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) bp->rx_quick_cons_trip = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) bp->rx_ticks_int = 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) bp->rx_ticks = 18;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) bp->current_interval = BNX2_TIMER_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) bp->phy_addr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) /* allocate stats_blk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365) rc = bnx2_alloc_stats_blk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) goto err_out_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) /* Disable WOL support if we are running on a SERDES chip. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) bnx2_get_5709_media(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373) bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) bp->phy_port = PORT_TP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377) bp->phy_port = PORT_FIBRE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) bp->flags |= BNX2_FLAG_NO_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) bp->wol = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383) if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) /* Don't do parallel detect on this board because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385) * some board problems. The link will not go down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386) * if we do parallel detect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) pdev->subsystem_device == 0x310c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390) bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392) bp->phy_addr = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393) if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394) bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) BNX2_CHIP(bp) == BNX2_CHIP_5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399) else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402) bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) bnx2_init_fw_cap(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408) (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) bp->flags |= BNX2_FLAG_NO_WOL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) bp->wol = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) if (bp->flags & BNX2_FLAG_NO_WOL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) device_set_wakeup_capable(&bp->pdev->dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) bp->tx_quick_cons_trip_int =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) bp->tx_quick_cons_trip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) bp->tx_ticks_int = bp->tx_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423) bp->rx_quick_cons_trip_int =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) bp->rx_quick_cons_trip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) bp->rx_ticks_int = bp->rx_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426) bp->comp_prod_trip_int = bp->comp_prod_trip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) bp->com_ticks_int = bp->com_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428) bp->cmd_ticks_int = bp->cmd_ticks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) /* Disable MSI on 5706 if AMD 8132 bridge is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434) * with byte enables disabled on the unused 32-bit word. This is legal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435) * but causes problems on the AMD 8132 which will eventually stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) * responding after a while.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438) * AMD believes this incompatibility is unique to the 5706, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) * prefers to locally disable MSI rather than globally disabling it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441) if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) struct pci_dev *amd_8132 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445) PCI_DEVICE_ID_AMD_8132_BRIDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) amd_8132))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) if (amd_8132->revision >= 0x10 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) amd_8132->revision <= 0x13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) disable_msi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) pci_dev_put(amd_8132);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) bnx2_set_default_link(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) timer_setup(&bp->timer, bnx2_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) #ifdef BCM_CNIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) bp->cnic_eth_dev.max_iscsi_conn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) bp->cnic_probe = bnx2_cnic_probe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474) err_out_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) if (bp->flags & BNX2_FLAG_AER_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) bp->flags &= ~BNX2_FLAG_AER_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480) pci_iounmap(pdev, bp->regview);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481) bp->regview = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483) err_out_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486) err_out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) kfree(bp->temp_stats_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) bnx2_bus_string(struct bnx2 *bp, char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) char *s = str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500) if (bp->flags & BNX2_FLAG_PCIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) s += sprintf(s, "PCI Express");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) s += sprintf(s, "PCI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504) if (bp->flags & BNX2_FLAG_PCIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) s += sprintf(s, "-X");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) if (bp->flags & BNX2_FLAG_PCI_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) s += sprintf(s, " 32-bit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509) s += sprintf(s, " 64-bit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512) return str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) bnx2_del_napi(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520) for (i = 0; i < bp->irq_nvecs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) netif_napi_del(&bp->bnx2_napi[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) bnx2_init_napi(struct bnx2 *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) for (i = 0; i < bp->irq_nvecs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530) struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) int (*poll)(struct napi_struct *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) poll = bnx2_poll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) poll = bnx2_poll_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) bnapi->bp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543) static const struct net_device_ops bnx2_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544) .ndo_open = bnx2_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) .ndo_start_xmit = bnx2_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546) .ndo_stop = bnx2_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) .ndo_get_stats64 = bnx2_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548) .ndo_set_rx_mode = bnx2_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549) .ndo_do_ioctl = bnx2_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) .ndo_set_mac_address = bnx2_change_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552) .ndo_change_mtu = bnx2_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) .ndo_set_features = bnx2_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) .ndo_tx_timeout = bnx2_tx_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556) .ndo_poll_controller = poll_bnx2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) struct bnx2 *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566) char str[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) /* dev zeroed in init_etherdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569) dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573) rc = bnx2_init_board(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577) dev->netdev_ops = &bnx2_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) dev->watchdog_timeo = TX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) dev->ethtool_ops = &bnx2_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) pci_set_drvdata(pdev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) * In-flight DMA from 1st kernel could continue going in kdump kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) * New io-page table has been created before bnx2 does reset at open stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) * We have to wait for the in-flight DMA to complete to avoid it look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) * into the newly created io-page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591) if (is_kdump_kernel())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) bnx2_wait_dma_complete(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594) memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) NETIF_F_TSO | NETIF_F_TSO_ECN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) NETIF_F_RXHASH | NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600) if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) dev->vlan_features = dev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605) dev->features |= dev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) dev->priv_flags |= IFF_UNICAST_FLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607) dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611) dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) if ((rc = register_netdev(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614) dev_err(&pdev->dev, "Cannot register net device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) "node addr %pM\n", board_info[ent->driver_data].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620) ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) pdev->irq, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628) pci_iounmap(pdev, bp->regview);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) bnx2_free_stats_blk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) bnx2_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) del_timer_sync(&bp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) cancel_work_sync(&bp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) pci_iounmap(bp->pdev, bp->regview);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) bnx2_free_stats_blk(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651) kfree(bp->temp_stats_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653) if (bp->flags & BNX2_FLAG_AER_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) pci_disable_pcie_error_reporting(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) bp->flags &= ~BNX2_FLAG_AER_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) bnx2_release_firmware(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668) bnx2_suspend(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) cancel_work_sync(&bp->reset_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) del_timer_sync(&bp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) bnx2_shutdown_chip(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679) __bnx2_free_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) bnx2_free_skbs(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) bnx2_setup_wol(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) bnx2_resume(struct device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689) struct net_device *dev = dev_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) if (!netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) bnx2_set_power_state(bp, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) bnx2_request_irq(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698) bnx2_init_nic(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704) #define BNX2_PM_OPS (&bnx2_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708) #define BNX2_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712) * bnx2_io_error_detected - called when PCI error is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714) * @state: The current pci connection state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) * This function is called after a PCI bus error affecting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) * this device has been detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726) netif_device_detach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) if (state == pci_channel_io_perm_failure) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) return PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) bnx2_netif_stop(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) del_timer_sync(&bp->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) /* Request a slot slot reset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747) * bnx2_io_slot_reset - called after the pci bus has been reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) * Restart the card from scratch, as if from a cold-boot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752) static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) if (pci_enable_device(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) "Cannot re-enable PCI device after reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) err = bnx2_init_nic(bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) result = PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) bnx2_napi_enable(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777) dev_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) * bnx2_io_resume - called when traffic can start flowing again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) * @pdev: Pointer to PCI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) * This callback is called when the error recovery driver tells us that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) * its OK to resume normal operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) static void bnx2_io_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) struct bnx2 *bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) bnx2_netif_start(bp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803) netif_device_attach(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) static void bnx2_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) struct net_device *dev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) struct bnx2 *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) bp = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) if (!bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821) dev_close(bp->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823) if (system_state == SYSTEM_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) bnx2_set_power_state(bp, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) static const struct pci_error_handlers bnx2_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) .error_detected = bnx2_io_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) .slot_reset = bnx2_io_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) .resume = bnx2_io_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835) static struct pci_driver bnx2_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) .name = DRV_MODULE_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) .id_table = bnx2_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838) .probe = bnx2_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) .remove = bnx2_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840) .driver.pm = BNX2_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) .err_handler = &bnx2_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) .shutdown = bnx2_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) module_pci_driver(bnx2_pci_driver);