^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2012-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "rx_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "tx_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "ef10_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "mcdi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "mcdi_pcol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "mcdi_port.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "mcdi_port_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "mcdi_functions.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "nic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "mcdi_filters.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "workarounds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "selftest.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "ef10_sriov.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/udp_tunnel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Hardware control for EF10 architecture including 'Huntington'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define EFX_EF10_DRVGEN_EV 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) EFX_EF10_TEST = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) EFX_EF10_REFILL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* VLAN list entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct efx_ef10_vlan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * bar; PFs use BAR 0/1 for memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) switch (efx->pci_dev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) case 0x0b03: /* SFC9250 PF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* All VFs use BAR 0/1 for memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bar = efx->type->mem_bar(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return resource_size(&efx->pci_dev->resource[bar]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static bool efx_ef10_is_vf(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return efx->type->is_vf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int efx_ef10_get_vf_index(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (outlen < sizeof(outbuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "unable to read datapath firmware capabilities\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) nic_data->datapath_caps =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) GET_CAPABILITIES_V2_OUT_FLAGS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) nic_data->piobuf_size = MCDI_WORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) nic_data->datapath_caps2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* record the DPCPU firmware IDs to determine VEB vswitching support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) nic_data->rx_dpcpu_fw_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) nic_data->tx_dpcpu_fw_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) "current firmware does not support an RX prefix\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u8 vi_window_mode = MCDI_BYTE(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* keep default VI stride */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "firmware did not report VI window mode, assuming vi_stride = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) efx->vi_stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) efx->num_mac_stats = MCDI_WORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) "firmware reports num_mac_stats = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) efx->num_mac_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "firmware did not report num_mac_stats, assuming %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) efx->num_mac_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static void efx_ef10_read_licensed_features(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) nic_data->licensed_features = MCDI_QWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) LICENSING_V3_OUT_LICENSED_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) outbuf, sizeof(outbuf), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return rc > 0 ? rc : -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int implemented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned int enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) nic_data->workaround_35388 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) nic_data->workaround_61265 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (rc == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Firmware without GET_WORKAROUNDS - not a problem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Bug61265 workaround is always enabled if implemented. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nic_data->workaround_61265 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) nic_data->workaround_35388 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Workaround is implemented but not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Try to enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) rc = efx_mcdi_set_workaround(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) MC_CMD_WORKAROUND_BUG35388,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) nic_data->workaround_35388 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* If we failed to set the workaround just carry on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "workaround for bug 35388 is %sabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) nic_data->workaround_35388 ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) "workaround for bug 61265 is %sabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) nic_data->workaround_61265 ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void efx_ef10_process_timer_config(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const efx_dword_t *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned int max_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (EFX_EF10_WORKAROUND_61265(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) efx->timer_quantum_ns = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) efx->timer_max_ns = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else if (EFX_EF10_WORKAROUND_35388(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) efx->timer_quantum_ns = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) max_count = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) efx->timer_max_ns = max_count * efx->timer_quantum_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) efx->timer_quantum_ns = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) max_count = MCDI_DWORD(data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) efx->timer_max_ns = max_count * efx->timer_quantum_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) "got timer properties from MC: quantum %u ns; max %u ns\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) efx->timer_quantum_ns, efx->timer_max_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int efx_ef10_get_timer_config(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rc = efx_ef10_get_timer_workarounds(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) outbuf, sizeof(outbuf), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) efx_ef10_process_timer_config(efx, outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) } else if (rc == -ENOSYS || rc == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Not available - fall back to Huntington defaults. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned int quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rc = efx_ef10_get_sysclk_freq(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) quantum = 1536000 / rc; /* 1536 cycles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) efx->timer_quantum_ns = quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) efx->timer_max_ns = efx->type->timer_period_max * quantum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) NULL, 0, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ether_addr_copy(mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int num_addrs, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) EVB_PORT_ID_ASSIGNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) num_addrs = MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) WARN_ON(num_addrs != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ether_addr_copy(mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct efx_nic *efx = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return sprintf(buf, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ((efx->mcdi->fn_flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static ssize_t efx_ef10_show_primary_flag(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct efx_nic *efx = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return sprintf(buf, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ((efx->mcdi->fn_flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct efx_ef10_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) list_for_each_entry(vlan, &nic_data->vlan_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (vlan->vid == vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct efx_ef10_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mutex_lock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) vlan = efx_ef10_find_vlan(efx, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* We add VID 0 on init. 8021q adds it on module init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * for all interfaces with VLAN filtring feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (vid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) goto done_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) netif_warn(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "VLAN %u already added\n", vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rc = -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) goto fail_exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto fail_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) vlan->vid = vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) list_add_tail(&vlan->list, &nic_data->vlan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (efx->filter_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) mutex_lock(&efx->mac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) down_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) up_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mutex_unlock(&efx->mac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto fail_filter_add_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) done_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) mutex_unlock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) fail_filter_add_vlan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_del(&vlan->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kfree(vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) fail_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) fail_exist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mutex_unlock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct efx_ef10_vlan *vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (efx->filter_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) down_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) efx_mcdi_filter_del_vlan(efx, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) up_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) list_del(&vlan->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) kfree(vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct efx_ef10_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* 8021q removes VID 0 on module unload for all interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * with VLAN filtering feature. We need to keep it to receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * untagged traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (vid == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mutex_lock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) vlan = efx_ef10_find_vlan(efx, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) "VLAN %u to be deleted not found\n", vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) efx_ef10_del_vlan_internal(efx, vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mutex_unlock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct efx_ef10_vlan *vlan, *next_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mutex_lock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) efx_ef10_del_vlan_internal(efx, vlan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mutex_unlock(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int efx_ef10_probe(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct efx_ef10_nic_data *nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!nic_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) efx->nic_data = nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* we assume later that we can copy from this buffer in dwords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* Get the MC's warm boot count. In case it's rebooting right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * now, be prepared to retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rc = efx_ef10_get_warm_boot_count(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (++i == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) nic_data->warm_boot_count = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* In case we're recovering from a crash (kexec), we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * cancel any outstanding request by the previous user of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * function. We send a special message using the least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * significant bits of the 'high' (doorbell) register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rc = efx_mcdi_init(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) mutex_init(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) nic_data->udp_tunnels[i].type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Reset (most) configuration for this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Enable event logging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rc = efx_mcdi_log_ctrl(efx, true, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rc = device_create_file(&efx->pci_dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) &dev_attr_link_control_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) goto fail3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto fail4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rc = efx_get_pf_index(efx, &nic_data->pf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rc = efx_ef10_init_datapath_caps(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) efx_ef10_read_licensed_features(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* We can have one VI for each vi_stride-byte region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * However, until we use TX option descriptors we need up to four
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * TX queues per channel for different checksumming combinations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) efx->tx_queues_per_channel = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) efx->tx_queues_per_channel = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!efx->max_vis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) efx->max_vis / efx->tx_queues_per_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) efx->max_tx_channels = efx->max_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (WARN_ON(efx->max_channels == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) efx->rx_packet_len_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) efx->net_dev->hw_features |= NETIF_F_RXFCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) rc = efx_mcdi_port_get_number(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) efx->port_num = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rc = efx_ef10_get_timer_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rc = efx_mcdi_mon_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (rc && rc != -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto fail5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) efx_ptp_defer_probe_with_channel(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) INIT_LIST_HEAD(&nic_data->vlan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mutex_init(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* Add unspecified VID to support VLAN filtering being disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) goto fail_add_vid_unspec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* If VLAN filtering is enabled, we need VID 0 to get untagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * traffic. It is added automatically if 8021q module is loaded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * but we can't rely on it since module may be not loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) rc = efx_ef10_add_vlan(efx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) goto fail_add_vid_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) efx->mcdi->fn_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) fail_add_vid_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) efx_ef10_cleanup_vlans(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) fail_add_vid_unspec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) mutex_destroy(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) efx_ptp_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) efx_mcdi_mon_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) fail5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) fail4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) fail3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) efx_mcdi_detach(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) mutex_lock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) (void)efx_ef10_set_udp_tnl_ports(efx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) mutex_unlock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mutex_destroy(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) efx_mcdi_fini(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kfree(nic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) efx->nic_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #ifdef EFX_USE_PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void efx_ef10_free_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) for (i = 0; i < nic_data->n_piobufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) nic_data->piobuf_handle[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) nic_data->n_piobufs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Don't display the MC error if we didn't have space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * for a VF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 0, outbuf, outlen, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) nic_data->piobuf_handle[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) "allocated PIO buffer %u handle %x\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) nic_data->piobuf_handle[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nic_data->n_piobufs = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) efx_ef10_free_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static int efx_ef10_link_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) unsigned int offset, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Link a buffer to each VI in the write-combining mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) for (index = 0; index < nic_data->n_piobufs; ++index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) nic_data->piobuf_handle[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) nic_data->pio_write_vi_base + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) "failed to link VI %u to PIO buffer %u (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) nic_data->pio_write_vi_base + index, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) "linked VI %u to PIO buffer %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) nic_data->pio_write_vi_base + index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Link a buffer to each TX queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /* Extra channels, even those with TXQs (PTP), do not require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * PIO resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (!channel->type->want_pio ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) channel->channel >= efx->xdp_channel_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) efx_for_each_channel_tx_queue(tx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* We assign the PIO buffers to queues in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * reverse order to allow for the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * special case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) offset = ((efx->tx_channel_offset + efx->n_tx_channels -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) tx_queue->channel->channel - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) efx_piobuf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) index = offset / nic_data->piobuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) offset = offset % nic_data->piobuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /* When the host page size is 4K, the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * host page in the WC mapping may be within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * the same VI page as the last TX queue. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * can only link one buffer to each VI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (tx_queue->queue == nic_data->pio_write_vi_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) BUG_ON(index != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) MCDI_SET_DWORD(inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) LINK_PIOBUF_IN_PIOBUF_HANDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) nic_data->piobuf_handle[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) MCDI_SET_DWORD(inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) LINK_PIOBUF_IN_TXQ_INSTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /* This is non-fatal; the TX path just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * won't use PIO for this queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) "failed to link VI %u to PIO buffer %u (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) tx_queue->queue, index, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tx_queue->piobuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tx_queue->piobuf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) nic_data->pio_write_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) index * efx->vi_stride + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tx_queue->piobuf_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) "linked VI %u to PIO buffer %u offset %x addr %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) tx_queue->queue, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tx_queue->piobuf_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) tx_queue->piobuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) while (index--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) nic_data->pio_write_vi_base + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* All our existing PIO buffers went away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) efx_for_each_channel(channel, efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) efx_for_each_channel_tx_queue(tx_queue, channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) tx_queue->piobuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #else /* !EFX_USE_PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return n == 0 ? 0 : -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static int efx_ef10_link_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void efx_ef10_free_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) #endif /* EFX_USE_PIO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void efx_ef10_remove(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct efx_ef10_nic_data *nic_data_pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct pci_dev *pci_dev_pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct efx_nic *efx_pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct ef10_vf *vf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (efx->pci_dev->is_virtfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pci_dev_pf = efx->pci_dev->physfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (pci_dev_pf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) efx_pf = pci_get_drvdata(pci_dev_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) nic_data_pf = efx_pf->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) vf = nic_data_pf->vf + nic_data->vf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) vf->efx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) "Could not get the PF id from VF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) efx_ef10_cleanup_vlans(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) mutex_destroy(&nic_data->vlan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) efx_ptp_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) efx_mcdi_mon_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) efx_mcdi_rx_free_indir_table(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (nic_data->wc_membase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) iounmap(nic_data->wc_membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) rc = efx_mcdi_free_vis(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) WARN_ON(rc != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (!nic_data->must_restore_piobufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) efx_ef10_free_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) efx_mcdi_detach(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) mutex_lock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) (void)efx_ef10_set_udp_tnl_ports(efx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) mutex_unlock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) mutex_destroy(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) efx_mcdi_fini(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) kfree(nic_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int efx_ef10_probe_pf(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return efx_ef10_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) u32 *port_flags, u32 *vadaptor_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) unsigned int *vlan_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (outlen < sizeof(outbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (port_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (vadaptor_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *vadaptor_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (vlan_tags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) *vlan_tags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) int efx_ef10_vport_add_mac(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned int port_id, u8 *mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int efx_ef10_vport_del_mac(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned int port_id, u8 *mac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int efx_ef10_probe_vf(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct pci_dev *pci_dev_pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* If the parent PF has no VF data structure, it doesn't know about this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * VF so fail probe. The VF needs to be re-created. This can happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * if the PF driver is unloaded while the VF is assigned to a guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) pci_dev_pf = efx->pci_dev->physfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (pci_dev_pf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!nic_data_pf->vf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) "The VF cannot link to its parent PF; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) "please destroy and re-create the VF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) rc = efx_ef10_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) rc = efx_ef10_get_vf_index(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (efx->pci_dev->is_virtfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (efx->pci_dev->physfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct efx_nic *efx_pf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) pci_get_drvdata(efx->pci_dev->physfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) nic_data_p->vf[nic_data->vf_index].efx = efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) nic_data_p->vf[nic_data->vf_index].pci_dev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) efx->pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) "Could not get the PF id from VF\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) efx_ef10_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static int efx_ef10_alloc_vis(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) unsigned int min_vis, unsigned int max_vis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) &nic_data->n_allocated_vis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /* Note that the failure path of this function does not free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * resources, as this will be done by efx_ef10_remove().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static int efx_ef10_dimension_resources(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) efx_separate_tx_channels ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) unsigned int channel_vis, pio_write_vi_base, max_vis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) unsigned int uc_mem_map_size, wc_mem_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) channel_vis = max(efx->n_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ((efx->n_tx_channels + efx->n_extra_tx_channels) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) efx->tx_queues_per_channel) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) efx->n_xdp_channels * efx->xdp_tx_per_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (efx->max_vis && efx->max_vis < channel_vis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) netif_dbg(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) "Reducing channel VIs from %u to %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) channel_vis, efx->max_vis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) channel_vis = efx->max_vis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) #ifdef EFX_USE_PIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* Try to allocate PIO buffers if wanted and if the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * number of PIO buffers would be sufficient to allocate one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * copy-buffer per TX channel. Failure is non-fatal, as there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * are only a small number of PIO buffers shared between all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * functions of the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (efx_piobuf_size != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) efx->n_tx_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) unsigned int n_piobufs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) DIV_ROUND_UP(efx->n_tx_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) nic_data->piobuf_size / efx_piobuf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (rc == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) "out of PIO buffers; cannot allocate more\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) else if (rc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) "not permitted to allocate PIO buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) else if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) "failed to allocate PIO buffers (%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) "allocated %u PIO buffers\n", n_piobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) nic_data->n_piobufs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* PIO buffers should be mapped with write-combining enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * and we want to make single UC and WC mappings rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * several of each (in fact that's the only option if host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * page size is >4K). So we may allocate some extra VIs just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * for writing PIO buffers through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * The UC mapping contains (channel_vis - 1) complete VIs and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * first 4K of the next VI. Then the WC mapping begins with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * the remainder of this last VI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ER_DZ_TX_PIOBUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (nic_data->n_piobufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* pio_write_vi_base rounds down to give the number of complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * VIs inside the UC mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) nic_data->n_piobufs) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) efx->vi_stride) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) uc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) max_vis = pio_write_vi_base + nic_data->n_piobufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) pio_write_vi_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) wc_mem_map_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) max_vis = channel_vis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* In case the last attached driver failed to free VIs, do it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) rc = efx_mcdi_free_vis(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (nic_data->n_allocated_vis < channel_vis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) "Could not allocate enough VIs to satisfy RSS"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) " requirements. Performance may not be optimal.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /* We didn't get the VIs to populate our channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * We could keep what we got but then we'd have more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * interrupts than we need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * Instead calculate new max_channels and restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) efx->max_channels = nic_data->n_allocated_vis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) efx->max_tx_channels =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) nic_data->n_allocated_vis / efx->tx_queues_per_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) efx_mcdi_free_vis(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) /* If we didn't get enough VIs to map all the PIO buffers, free the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * PIO buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (nic_data->n_piobufs &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) nic_data->n_allocated_vis <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) pio_write_vi_base + nic_data->n_piobufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) "%u VIs are not sufficient to map %u PIO buffers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) nic_data->n_allocated_vis, nic_data->n_piobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) efx_ef10_free_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* Shrink the original UC mapping of the memory BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) membase = ioremap(efx->membase_phys, uc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) "could not shrink memory BAR to %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) uc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) iounmap(efx->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) efx->membase = membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* Set up the WC mapping if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (wc_mem_map_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) nic_data->wc_membase = ioremap_wc(efx->membase_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) uc_mem_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) wc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!nic_data->wc_membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) "could not allocate WC mapping of size %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) wc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) nic_data->pio_write_vi_base = pio_write_vi_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) nic_data->pio_write_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) nic_data->wc_membase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) uc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) rc = efx_ef10_link_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) efx_ef10_free_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) &efx->membase_phys, efx->membase, uc_mem_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) nic_data->wc_membase, wc_mem_map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static void efx_ef10_fini_nic(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) kfree(nic_data->mc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) nic_data->mc_stats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) static int efx_ef10_init_nic(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) netdev_features_t hw_enc_features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (nic_data->must_check_datapath_caps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) rc = efx_ef10_init_datapath_caps(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) nic_data->must_check_datapath_caps = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (efx->must_realloc_vis) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /* We cannot let the number of VIs change now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) nic_data->n_allocated_vis);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) efx->must_realloc_vis = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) if (!nic_data->mc_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) rc = efx_ef10_link_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) efx_ef10_free_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Log an error on failure, but this is non-fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Permission errors are less important - we've presumably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * had the PIO buffer licence removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (rc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) netif_dbg(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) "not permitted to restore PIO buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) else if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) "failed to restore PIO buffers (%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) nic_data->must_restore_piobufs = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /* add encapsulated checksum offload features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) /* add encapsulated TSO features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) netdev_features_t encap_tso_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) hw_enc_features |= encap_tso_features | NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) efx->net_dev->features |= encap_tso_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) efx->net_dev->hw_enc_features = hw_enc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* don't fail init if RSS setup doesn't work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) rc = efx->type->rx_push_rss_config(efx, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) efx->rss_context.rx_indir_table, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* All our allocations have been reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) efx->must_realloc_vis = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) efx_mcdi_filter_table_reset_mc_allocations(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) nic_data->must_restore_piobufs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) efx_ef10_forget_old_piobufs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* Driver-created vswitches and vports must be re-created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) nic_data->must_probe_vswitching = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) efx->vport_id = EVB_PORT_ID_ASSIGNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (nic_data->vf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) for (i = 0; i < efx->vf_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) nic_data->vf[i].vport_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (reason == RESET_TYPE_MC_FAILURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) return RESET_TYPE_DATAPATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return efx_mcdi_map_reset_reason(reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static int efx_ef10_map_reset_flags(u32 *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) ETH_RESET_SHARED_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ETH_RESET_OFFLOAD | ETH_RESET_MAC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ETH_RESET_PHY | ETH_RESET_MGMT) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) ETH_RESET_SHARED_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* We assume for now that our PCI function is permitted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * reset everything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) *flags &= ~EF10_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return RESET_TYPE_WORLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) *flags &= ~EF10_RESET_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return RESET_TYPE_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* no invisible reset implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int rc = efx_mcdi_reset(efx, reset_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /* Unprivileged functions return -EPERM, but need to return success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * here so that the datapath is brought back up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* If it was a port reset, trigger reallocation of MC resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * Note that on an MC reset nothing needs to be done now because we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * detect the MC reset later and handle it then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * For an FLR, we never get an MC reset event, but the MC has reset all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * resources assigned to us, so we have to trigger reallocation now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if ((reset_type == RESET_TYPE_ALL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) efx_ef10_table_reset_mc_allocations(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) #define EF10_DMA_STAT(ext_name, mcdi_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) [EF10_STAT_ ## ext_name] = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) [EF10_STAT_ ## int_name] = \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) #define EF10_OTHER_STAT(ext_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) EF10_DMA_STAT(port_tx_packets, TX_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) EF10_OTHER_STAT(port_rx_good_bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) EF10_OTHER_STAT(port_rx_bad_bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) EF10_DMA_STAT(port_rx_packets, RX_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) EFX_GENERIC_SW_STAT(rx_noskb_drops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) (1ULL << EF10_STAT_port_tx_packets) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) (1ULL << EF10_STAT_port_tx_pause) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) (1ULL << EF10_STAT_port_tx_unicast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) (1ULL << EF10_STAT_port_tx_multicast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) (1ULL << EF10_STAT_port_tx_broadcast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) (1ULL << EF10_STAT_port_rx_bytes) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) (1ULL << \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) EF10_STAT_port_rx_bytes_minus_good_bytes) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) (1ULL << EF10_STAT_port_rx_good_bytes) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) (1ULL << EF10_STAT_port_rx_bad_bytes) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) (1ULL << EF10_STAT_port_rx_packets) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) (1ULL << EF10_STAT_port_rx_good) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) (1ULL << EF10_STAT_port_rx_bad) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) (1ULL << EF10_STAT_port_rx_pause) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) (1ULL << EF10_STAT_port_rx_control) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) (1ULL << EF10_STAT_port_rx_unicast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) (1ULL << EF10_STAT_port_rx_multicast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) (1ULL << EF10_STAT_port_rx_broadcast) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) (1ULL << EF10_STAT_port_rx_lt64) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) (1ULL << EF10_STAT_port_rx_64) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) (1ULL << EF10_STAT_port_rx_65_to_127) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) (1ULL << EF10_STAT_port_rx_128_to_255) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) (1ULL << EF10_STAT_port_rx_256_to_511) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) (1ULL << EF10_STAT_port_rx_512_to_1023) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) (1ULL << EF10_STAT_port_rx_gtjumbo) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) (1ULL << EF10_STAT_port_rx_overflow) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) (1ULL << GENERIC_STAT_rx_noskb_drops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /* On 7000 series NICs, these statistics are only provided by the 10G MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * For a 10G/40G switchable port we do not expose these because they might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * not include all the packets they should.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * On 8000 series NICs these statistics are always provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) (1ULL << EF10_STAT_port_tx_lt64) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) (1ULL << EF10_STAT_port_tx_64) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) (1ULL << EF10_STAT_port_tx_65_to_127) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) (1ULL << EF10_STAT_port_tx_128_to_255) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) (1ULL << EF10_STAT_port_tx_256_to_511) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) (1ULL << EF10_STAT_port_tx_512_to_1023) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) /* These statistics are only provided by the 40G MAC. For a 10G/40G
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) * switchable port we do expose these because the errors will otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * be silent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) (1ULL << EF10_STAT_port_rx_length_error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* These statistics are only provided if the firmware supports the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * capability PM_AND_RXDP_COUNTERS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) #define HUNT_PM_AND_RXDP_STAT_MASK ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * These bits are in the second u64 of the raw mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) #define EF10_FEC_STAT_MASK ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * These bits are in the second u64 of the raw mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) #define EF10_CTPIO_STAT_MASK ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) (1ULL << (EF10_STAT_ctpio_success - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) (1ULL << (EF10_STAT_ctpio_poison - 64)) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) (1ULL << (EF10_STAT_ctpio_erase - 64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) u64 raw_mask = HUNT_COMMON_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) u32 port_caps = efx_mcdi_phy_get_caps(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (!(efx->mcdi->fn_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /* 8000 series have everything even at 40G */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (nic_data->datapath_caps2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) raw_mask |= HUNT_10G_ONLY_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) raw_mask |= HUNT_10G_ONLY_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return raw_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) u64 raw_mask[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) raw_mask[0] = efx_ef10_raw_stat_mask(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* Only show vadaptor stats when EVB capability is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) raw_mask[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) raw_mask[1] |= EF10_FEC_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /* CTPIO stats appear in V3. Only show them on devices that actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * support CTPIO. Although this driver doesn't use CTPIO others might,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * and we may be reporting the stats for the underlying port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) (nic_data->datapath_caps2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) raw_mask[1] |= EF10_CTPIO_STAT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) mask[0] = raw_mask[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) mask[1] = raw_mask[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) mask[0] = raw_mask[0] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) mask[1] = raw_mask[0] >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) mask[2] = raw_mask[1] & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) DECLARE_BITMAP(mask, EF10_STAT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) efx_ef10_get_stat_mask(efx, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) mask, names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct rtnl_link_stats64 *core_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) DECLARE_BITMAP(mask, EF10_STAT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) u64 *stats = nic_data->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) size_t stats_count = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) efx_ef10_get_stat_mask(efx, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (full_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) for_each_set_bit(index, mask, EF10_STAT_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (efx_ef10_stat_desc[index].name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) *full_stats++ = stats[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) ++stats_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (!core_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return stats_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* Use vadaptor stats. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) stats[EF10_STAT_rx_multicast] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) stats[EF10_STAT_rx_broadcast];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) stats[EF10_STAT_tx_multicast] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) stats[EF10_STAT_tx_broadcast];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) stats[EF10_STAT_rx_multicast_bytes] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) stats[EF10_STAT_rx_broadcast_bytes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) stats[EF10_STAT_tx_multicast_bytes] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) stats[EF10_STAT_tx_broadcast_bytes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) stats[GENERIC_STAT_rx_noskb_drops];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) core_stats->multicast = stats[EF10_STAT_rx_multicast];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) core_stats->rx_errors = core_stats->rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) core_stats->tx_errors = stats[EF10_STAT_tx_bad];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /* Use port stats. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) stats[GENERIC_STAT_rx_nodesc_trunc] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) stats[GENERIC_STAT_rx_noskb_drops];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) core_stats->rx_length_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) stats[EF10_STAT_port_rx_gtjumbo] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) stats[EF10_STAT_port_rx_length_error];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) core_stats->rx_frame_errors =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) stats[EF10_STAT_port_rx_align_error];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) core_stats->rx_errors = (core_stats->rx_length_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) core_stats->rx_crc_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) core_stats->rx_frame_errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return stats_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) struct rtnl_link_stats64 *core_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) DECLARE_BITMAP(mask, EF10_STAT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) u64 *stats = nic_data->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) efx_ef10_get_stat_mask(efx, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) efx_nic_copy_stats(efx, nic_data->mc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) mask, stats, nic_data->mc_stats, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /* Update derived statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) efx_nic_fix_nodesc_drop_stat(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) &stats[EF10_STAT_port_rx_nodesc_drops]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Here we calculate port_rx_good_bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) stats[EF10_STAT_port_rx_good_bytes] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) stats[EF10_STAT_port_rx_bytes] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* The asynchronous reads used to calculate RX_BAD_BYTES in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * MC Firmware are done such that we should not see an increase in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * RX_BAD_BYTES when a good packet has arrived. Unfortunately this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * does mean that the stat can decrease at times. Here we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * update the stat unless it has increased or has gone to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * (In the case of the NIC rebooting).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * Please see Bug 33781 for a discussion of why things work this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) efx_update_sw_stats(efx, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return efx_ef10_update_stats_common(efx, full_stats, core_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) __must_hold(&efx->stats_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) DECLARE_BITMAP(mask, EF10_STAT_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) __le64 generation_start, generation_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) u64 *stats = nic_data->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) u32 dma_len = efx->num_mac_stats * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct efx_buffer stats_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) __le64 *dma_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) spin_unlock_bh(&efx->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) efx_ef10_get_stat_mask(efx, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) spin_lock_bh(&efx->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) dma_stats = stats_buf.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) MAC_STATS_IN_DMA, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) spin_lock_bh(&efx->stats_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) /* Expect ENOENT if DMA queues have not been set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (rc != -ENOENT || atomic_read(&efx->active_queues))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) sizeof(inbuf), NULL, 0, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) generation_end = dma_stats[efx->num_mac_stats - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) stats, stats_buf.addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (generation_end != generation_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) efx_update_sw_stats(efx, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) efx_nic_free_buffer(efx, &stats_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct rtnl_link_stats64 *core_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (efx_ef10_try_update_nic_stats_vf(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return efx_ef10_update_stats_common(efx, full_stats, core_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct rtnl_link_stats64 *core_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* In atomic context, cannot update HW stats. Just update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * software stats and return so the caller can continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) efx_update_sw_stats(efx, nic_data->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return efx_ef10_update_stats_common(efx, full_stats, core_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) unsigned int mode, usecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) efx_dword_t timer_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (channel->irq_moderation_us) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) mode = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) usecs = channel->irq_moderation_us;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) usecs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (EFX_EF10_WORKAROUND_61265(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) unsigned int ns = usecs * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) inbuf, sizeof(inbuf), 0, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) } else if (EFX_EF10_WORKAROUND_35388(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) EFE_DD_EVQ_IND_TIMER_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) ERF_DD_EVQ_IND_TIMER_MODE, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) ERF_DD_EVQ_IND_TIMER_VAL, ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) ERF_DZ_TC_TIMER_VAL, ticks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) ERF_FZ_TC_TMR_REL_VAL, ticks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static void efx_ef10_get_wol_vf(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct ethtool_wolinfo *wol) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) wol->supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) wol->wolopts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) memset(&wol->sopass, 0, sizeof(wol->sopass));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (type != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static void efx_ef10_mcdi_request(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) const efx_dword_t *hdr, size_t hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) const efx_dword_t *sdu, size_t sdu_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) u8 *pdu = nic_data->mcdi_buf.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) memcpy(pdu, hdr, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) memcpy(pdu + hdr_len, sdu, sdu_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /* The hardware provides 'low' and 'high' (doorbell) registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * for passing the 64-bit address of an MCDI request to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * firmware. However the dwords are swapped by firmware. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * least significant bits of the doorbell are then 0 for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * MCDI requests due to alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) ER_DZ_MC_DB_LWRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) ER_DZ_MC_DB_HWRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) size_t offset, size_t outlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) const u8 *pdu = nic_data->mcdi_buf.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) memcpy(outbuf, pdu + offset, outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /* All our allocations have been reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) efx_ef10_table_reset_mc_allocations(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* The datapath firmware might have been changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) nic_data->must_check_datapath_caps = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) /* MAC statistics have been cleared on the NIC; clear the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * statistic that we update with efx_update_diff_stat().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) rc = efx_ef10_get_warm_boot_count(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) /* The firmware is presumably in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * rebooting. However, we are supposed to report each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * reboot just once, so we must only do that once we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * can read and store the updated warm boot count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (rc == nic_data->warm_boot_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) nic_data->warm_boot_count = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) efx_ef10_mcdi_reboot_detected(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) /* Handle an MSI interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * Handle an MSI hardware interrupt. This routine schedules event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * queue processing. No interrupt acknowledgement cycle is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * Also, we never need to check that the interrupt is for us, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * MSI interrupts cannot be shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct efx_msi_context *context = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) struct efx_nic *efx = context->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) netif_vdbg(efx, intr, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (likely(READ_ONCE(efx->irq_soft_enabled))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Note test interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (context->index == efx->irq_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) efx->last_irq_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* Schedule processing of the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) efx_schedule_channel_irq(efx->channel[context->index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct efx_nic *efx = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) u32 queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* Read the ISR which also ACKs the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (queues == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (likely(soft_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* Note test interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (queues & (1U << efx->irq_level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) efx->last_irq_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) if (queues & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) efx_schedule_channel_irq(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) queues >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) netif_vdbg(efx, intr, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int efx_ef10_irq_test_generate(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) NULL) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) inbuf, sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* low two bits of label are what we want for type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) tx_queue->type = tx_queue->label & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) (tx_queue->ptr_mask + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) sizeof(efx_qword_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) /* This writes to the TX_DESC_WPTR and also pushes data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) const efx_qword_t *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) unsigned int write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) reg.qword[0] = *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) efx_writeo_page(tx_queue->efx, ®,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) ER_DZ_TX_DESC_UPD, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) /* Add Firmware-Assisted TSO v2 option descriptors to a queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) bool *data_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct efx_tx_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) u16 inner_ipv4_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) u16 outer_ipv4_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) struct tcphdr *tcp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) struct iphdr *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) u16 ip_tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) u32 seqnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) u32 mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (unlikely(mss < 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (skb->encapsulation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (!tx_queue->tso_encap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) ip = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (ip->version == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) outer_ipv4_id = ntohs(ip->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) ip = inner_ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) tcp = inner_tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) ip = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) tcp = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /* 8000-series EF10 hardware requires that IP Total Length be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * greater than or equal to the value it will have in each segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * (which is at most mss + 208 + TCP header length), but also less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * than (0x10000 - inner_network_header). Otherwise the TCP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) * checksum calculation will be broken for encapsulated packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * We fill in ip->tot_len with 0xff30, which should satisfy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * first requirement unless the MSS is ridiculously large (which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * should be impossible as the driver max MTU is 9216); it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * guaranteed to satisfy the second as we only attempt TSO if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * inner_network_header <= 208.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) (tcp->doff << 2u) > ip_tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (ip->version == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) ip->tot_len = htons(ip_tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) ip->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) inner_ipv4_id = ntohs(ip->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) seqnum = ntohl(tcp->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) buffer = efx_tx_queue_get_insert_buffer(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) buffer->flags = EFX_TX_BUF_OPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) buffer->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) buffer->unmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) EFX_POPULATE_QWORD_5(buffer->option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ESF_DZ_TX_DESC_IS_OPT, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) ESF_DZ_TX_TSO_OPTION_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) ++tx_queue->insert_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) buffer = efx_tx_queue_get_insert_buffer(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) buffer->flags = EFX_TX_BUF_OPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) buffer->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) buffer->unmap_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) EFX_POPULATE_QWORD_5(buffer->option,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) ESF_DZ_TX_DESC_IS_OPT, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) ESF_DZ_TX_TSO_OPTION_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) ESF_DZ_TX_TSO_TCP_MSS, mss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) ++tx_queue->insert_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static u32 efx_ef10_tso_versions(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) u32 tso_versions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) tso_versions |= BIT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if (nic_data->datapath_caps2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) tso_versions |= BIT(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return tso_versions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct efx_channel *channel = tx_queue->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) struct efx_nic *efx = tx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct efx_ef10_nic_data *nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) efx_qword_t *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /* Only attempt to enable TX timestamping if we have the license for it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * otherwise TXQ init will fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (!(nic_data->licensed_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) tx_queue->timestamping = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /* Disable sync events on this channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (efx->type->ptp_set_ts_sync_events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) efx->type->ptp_set_ts_sync_events(efx, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) /* TSOv2 is a limited resource that can only be configured on a limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) * number of queues. TSO without checksum offload is not really a thing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * so we only enable it for those queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * TSOv2 cannot be used with Hardware timestamping, and is never needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * for XDP tx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (efx_has_cap(efx, TX_TSO_V2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if ((csum_offload || inner_csum) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) !tx_queue->timestamping && !tx_queue->xdp_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) tx_queue->tso_version = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) } else if (efx_has_cap(efx, TX_TSO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) tx_queue->tso_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) rc = efx_mcdi_tx_init(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) /* A previous user of this TX queue might have set us up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * bomb by writing a descriptor to the TX push collector but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * not the doorbell. (Each collector belongs to a port, not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * queue or function, so cannot easily be reset.) We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * attempt to push a no-op descriptor in its place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) tx_queue->insert_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) txd = efx_tx_desc(tx_queue, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) EFX_POPULATE_QWORD_7(*txd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) ESF_DZ_TX_DESC_IS_OPT, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) ESF_DZ_TX_OPTION_TYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) tx_queue->write_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) tx_queue->tso_encap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) efx_ef10_push_tx_desc(tx_queue, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) unsigned int write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) efx_writed_page(tx_queue->efx, ®,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) dma_addr_t dma_addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /* If we need to break across multiple descriptors we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * stop at a page boundary. This assumes the length limit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) * greater than the page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) unsigned int old_write_count = tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) struct efx_tx_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) unsigned int write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) efx_qword_t *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) tx_queue->xmit_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (unlikely(tx_queue->write_count == tx_queue->insert_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) buffer = &tx_queue->buffer[write_ptr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) txd = efx_tx_desc(tx_queue, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) ++tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* Create TX descriptor ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (buffer->flags & EFX_TX_BUF_OPTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) *txd = buffer->option;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) /* PIO descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) tx_queue->packet_write_count = tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) tx_queue->packet_write_count = tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) EFX_POPULATE_QWORD_3(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) *txd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ESF_DZ_TX_KER_CONT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) buffer->flags & EFX_TX_BUF_CONT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) } while (tx_queue->write_count != tx_queue->insert_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) wmb(); /* Ensure descriptors are written before they are fetched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) txd = efx_tx_desc(tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) old_write_count & tx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) efx_ef10_push_tx_desc(tx_queue, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) ++tx_queue->pushes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) efx_ef10_notify_tx_desc(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) unsigned int enabled, implemented;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) bool want_workaround_26807;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (rc == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) /* GET_WORKAROUNDS was implemented before this workaround,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * thus it must be unavailable in this firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) nic_data->workaround_26807 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) want_workaround_26807 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) nic_data->workaround_26807 =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) if (want_workaround_26807 && !nic_data->workaround_26807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) rc = efx_mcdi_set_workaround(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) MC_CMD_WORKAROUND_BUG26807,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) true, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) "other functions on NIC have been reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) /* With MCFW v4.6.x and earlier, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * boot count will have incremented,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * so re-read the warm_boot_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * value now to ensure this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) * doesn't think it has changed next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * time it checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) rc = efx_ef10_get_warm_boot_count(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (rc >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) nic_data->warm_boot_count = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) nic_data->workaround_26807 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) } else if (rc == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) static int efx_ef10_filter_table_probe(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) int rc = efx_ef10_probe_multicast_chaining(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) struct efx_mcdi_filter_vlan *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) list_for_each_entry(vlan, &nic_data->vlan_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) goto fail_add_vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) fail_add_vlan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) efx_mcdi_filter_table_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* This creates an entry in the RX descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) struct efx_rx_buffer *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) efx_qword_t *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) rxd = efx_rx_desc(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) rx_buf = efx_rx_buffer(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) EFX_POPULATE_QWORD_2(*rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) unsigned int write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) write_count = rx_queue->added_count & ~7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (rx_queue->notified_count == write_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) efx_ef10_build_rx_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) rx_queue->notified_count & rx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) while (++rx_queue->notified_count != write_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) write_count & rx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) efx_qword_t event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) EFX_POPULATE_QWORD_2(event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) ESF_DZ_EV_DATA, EFX_EF10_REFILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) * already swapped the data to little-endian order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) sizeof(efx_qword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) inbuf, sizeof(inbuf), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) efx_ef10_rx_defer_refill_complete, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) int rc, efx_dword_t *outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) size_t outlen_actual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) /* nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) static int efx_ef10_ev_init(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct efx_ef10_nic_data *nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) bool use_v2, cut_thru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) use_v2 = nic_data->datapath_caps2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) cut_thru = !(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return efx_mcdi_ev_init(channel, cut_thru, use_v2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) unsigned int rx_queue_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) netif_info(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) "rx event arrived on queue %d labeled as queue %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) efx_rx_queue_index(rx_queue), rx_queue_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) efx_schedule_reset(efx, RESET_TYPE_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) unsigned int actual, unsigned int expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) netif_info(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) "dropped %d events (index=%d expected=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) dropped, actual, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) efx_schedule_reset(efx, RESET_TYPE_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /* partially received RX was aborted. clean up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) unsigned int rx_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) "scattered RX aborted (dropping %u buffers)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) rx_queue->scatter_n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 0, EFX_RX_PKT_DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) rx_queue->removed_count += rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) rx_queue->scatter_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) rx_queue->scatter_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) unsigned int n_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) unsigned int rx_encap_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) unsigned int rx_l3_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) unsigned int rx_l4_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) const efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) bool handled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) if (!(efx->net_dev->features & NETIF_F_RXALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (!efx->loopback_selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) channel->n_rx_eth_crc_err += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return EFX_RX_PKT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) handled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) "invalid class for RX_IPCKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (!efx->loopback_selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) *(rx_encap_hdr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) &channel->n_rx_outer_ip_hdr_chksum_err :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) &channel->n_rx_ip_hdr_chksum_err) += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) "invalid class for RX_TCPUDP_CKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (!efx->loopback_selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) *(rx_encap_hdr ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) &channel->n_rx_outer_tcp_udp_chksum_err :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) &channel->n_rx_tcp_udp_chksum_err) += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) if (unlikely(!rx_encap_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (!efx->loopback_selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) if (unlikely(!rx_encap_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (!efx->loopback_selftest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) WARN_ON(!handled); /* No error bits were recognised */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) static int efx_ef10_handle_rx_event(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) const efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) unsigned int n_descs, n_packets, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) efx_qword_t errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) bool rx_cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) u16 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (unlikely(READ_ONCE(efx->reset_pending)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) /* Basic packet information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) rx_encap_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ESE_EZ_ENCAP_HDR_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (n_descs != rx_queue->scatter_n + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) /* detect rx abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (unlikely(n_descs == rx_queue->scatter_n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) if (rx_queue->scatter_n == 0 || rx_bytes != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) "invalid RX abort: scatter_n=%u event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) rx_queue->scatter_n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) efx_ef10_handle_rx_abort(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) /* Check that RX completion merging is valid, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * the current firmware supports it and this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * non-scattered packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (!(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) rx_queue->scatter_n != 0 || rx_cont) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) efx_ef10_handle_rx_bad_lbits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) rx_queue, next_ptr_lbits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) (rx_queue->removed_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) rx_queue->scatter_n + 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) /* Merged completion for multiple non-scattered packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) rx_queue->scatter_n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) rx_queue->scatter_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) n_packets = n_descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) ++channel->n_rx_merge_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) channel->n_rx_merge_packets += n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) flags |= EFX_RX_PKT_PREFIX_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) ++rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) rx_queue->scatter_len += rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (rx_cont)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) n_packets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) ESF_DZ_RX_IPCKSUM_ERR, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) EFX_AND_QWORD(errors, *event, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) rx_encap_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) rx_l3_class, rx_l4_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) rx_l4_class == ESE_FZ_L4_CLASS_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) switch (rx_encap_hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (tcpudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) case ESE_EZ_ENCAP_HDR_GRE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) case ESE_EZ_ENCAP_HDR_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (tcpudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) flags |= EFX_RX_PKT_CSUMMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) netdev_WARN(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) "unknown encapsulation type: event="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) flags |= EFX_RX_PKT_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) channel->irq_mod_score += 2 * n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /* Handle received packet(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) for (i = 0; i < n_packets; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) efx_rx_packet(rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) rx_queue->removed_count & rx_queue->ptr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) rx_queue->scatter_n, rx_queue->scatter_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) rx_queue->removed_count += rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) rx_queue->scatter_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) rx_queue->scatter_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return n_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) u32 tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) tstamp <<= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return tstamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) unsigned int tx_ev_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) unsigned int tx_ev_q_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) unsigned int tx_ev_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) u64 ts_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) if (unlikely(READ_ONCE(efx->reset_pending)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) /* Get the transmit queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) if (!tx_queue->timestamping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) /* Transmit completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) /* Transmit timestamps are only available for 8XXX series. They result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) * in up to three events per packet. These occur in order, and are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) * - the normal completion event (may be omitted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) * - the low part of the timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) * - the high part of the timestamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * It's possible for multiple completion events to appear before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) * corresponding timestamps. So we can for example get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * COMP N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * COMP N+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * TS_LO N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * TS_HI N
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * TS_LO N+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * TS_HI N+1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * In addition it's also possible for the adjacent completions to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * merged, so we may not see COMP N above. As such, the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * events are not very useful here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * Each part of the timestamp is itself split across two 16 bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * fields in the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) switch (tx_ev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) /* Ignore this event - see above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) ts_part = efx_ef10_extract_event_ts(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) tx_queue->completed_timestamp_minor = ts_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) ts_part = efx_ef10_extract_event_ts(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) tx_queue->completed_timestamp_major = ts_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) efx_xmit_done_single(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) "channel %d unknown tx event type %d (data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) EFX_QWORD_FMT ")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) channel->channel, tx_ev_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) int subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) switch (subcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) case ESE_DZ_DRV_TIMER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) case ESE_DZ_DRV_WAKE_UP_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) case ESE_DZ_DRV_START_UP_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) /* event queue init complete. ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) "channel %d unknown driver event type %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) " (data " EFX_QWORD_FMT ")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) channel->channel, subcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) u32 subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) switch (subcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) case EFX_EF10_TEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) channel->event_test_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) case EFX_EF10_REFILL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) /* The queue must be empty, so we won't receive any rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * events, so efx_process_channel() won't refill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * queue. Refill it here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) efx_fast_push_rx_descriptors(&channel->rx_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) "channel %d unknown driver event type %u"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) " (data " EFX_QWORD_FMT ")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) channel->channel, (unsigned) subcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) efx_qword_t event, *p_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) unsigned int read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) int ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) int spent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) if (quota <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) read_ptr = channel->eventq_read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) p_event = efx_event(channel, read_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) event = *p_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (!efx_event_present(&event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) EFX_SET_QWORD(*p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ++read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) netif_vdbg(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) "processing event on %d " EFX_QWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) channel->channel, EFX_QWORD_VAL(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) switch (ev_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) case ESE_DZ_EV_CODE_MCDI_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) efx_mcdi_process_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) case ESE_DZ_EV_CODE_RX_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) spent += efx_ef10_handle_rx_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) if (spent >= quota) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) /* XXX can we split a merged event to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) * avoid going over-quota?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) spent = quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) case ESE_DZ_EV_CODE_TX_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) efx_ef10_handle_tx_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) case ESE_DZ_EV_CODE_DRIVER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) efx_ef10_handle_driver_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) if (++spent == quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) case EFX_EF10_DRVGEN_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) efx_ef10_handle_driver_generated_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) "channel %d unknown event type %d"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) " (data " EFX_QWORD_FMT ")\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) channel->channel, ev_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) EFX_QWORD_VAL(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) channel->eventq_read_ptr = read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) return spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) static void efx_ef10_ev_read_ack(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) efx_dword_t rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (EFX_EF10_WORKAROUND_35388(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) ERF_DD_EVQ_IND_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) (channel->eventq_read_ptr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) channel->eventq_mask) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) ERF_DD_EVQ_IND_RPTR_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) ERF_DD_EVQ_IND_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) channel->eventq_read_ptr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) channel->eventq_read_ptr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) channel->eventq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static void efx_ef10_ev_test_generate(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) efx_qword_t event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) EFX_POPULATE_QWORD_2(event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) ESF_DZ_EV_DATA, EFX_EF10_TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) * already swapped the data to little-endian order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) sizeof(efx_qword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) WARN_ON(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) static void efx_ef10_prepare_flr(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) atomic_set(&efx->active_queues, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) u8 mac_old[ETH_ALEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) int rc, rc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) /* Only reconfigure a PF-created vport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (is_zero_ether_addr(nic_data->vport_mac))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) efx_device_detach_sync(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) efx_net_stop(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) down_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) efx_mcdi_filter_table_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) up_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) goto restore_filters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) ether_addr_copy(mac_old, nic_data->vport_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) nic_data->vport_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) goto restore_vadaptor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) efx->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) if (rc2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) /* Failed to add original MAC, so clear vport_mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) eth_zero_addr(nic_data->vport_mac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) goto reset_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) restore_vadaptor:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (rc2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) goto reset_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) restore_filters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) down_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) rc2 = efx_ef10_filter_table_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) up_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) if (rc2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) goto reset_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) rc2 = efx_net_open(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (rc2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) goto reset_nic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) efx_device_attach_if_not_resetting(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) reset_nic:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) "Failed to restore when changing MAC address - scheduling reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) return rc ? rc : rc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) static int efx_ef10_set_mac_address(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) bool was_enabled = efx->port_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) efx_device_detach_sync(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) efx_net_stop(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) mutex_lock(&efx->mac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) down_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) efx_mcdi_filter_table_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) efx->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) efx->vport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) efx_ef10_filter_table_probe(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) up_write(&efx->filter_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) mutex_unlock(&efx->mac_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (was_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) efx_net_open(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) efx_device_attach_if_not_resetting(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) if (rc == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) struct efx_nic *efx_pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) /* Switch to PF and change MAC address on vport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) efx_pf = pci_get_drvdata(pci_dev_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) rc = efx_ef10_sriov_set_vf_mac(efx_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) nic_data->vf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) efx->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) } else if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) /* MAC address successfully changed by VF (with MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) * spoofing) so update the parent PF if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) for (i = 0; i < efx_pf->vf_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) struct ef10_vf *vf = nic_data->vf + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (vf->efx == efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) ether_addr_copy(vf->mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) efx->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (rc == -EPERM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) "Cannot change MAC address; use sfboot to enable"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) " mac-spoofing on this interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) * fall-back to the method of changing the MAC address on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) * vport. This only applies to PFs because such versions of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) * MCFW do not support VFs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) rc = efx_ef10_vport_set_mac_address(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) } else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) sizeof(inbuf), NULL, 0, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) WARN_ON(!mutex_is_locked(&efx->mac_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) efx_mcdi_filter_sync_rx_mode(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) return efx_mcdi_set_mtu(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) return efx_mcdi_set_mac(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) /* MC BISTs follow a different poll mechanism to phy BISTs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) * The BIST is done in the poll handler on the MC, and the MCDI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) * will block until the BIST is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) static int efx_ef10_poll_bist(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) size_t outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) u32 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) case MC_CMD_POLL_BIST_PASSED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) case MC_CMD_POLL_BIST_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) case MC_CMD_POLL_BIST_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) "BIST returned unknown result %u", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) rc = efx_ef10_start_bist(efx, bist_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) return efx_ef10_poll_bist(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) int rc, rc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) efx_reset_down(efx, RESET_TYPE_WORLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) NULL, 0, NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (rc == -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return rc ? rc : rc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) #ifdef CONFIG_SFC_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) struct efx_ef10_nvram_type_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) u16 type, type_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) u8 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) struct efx_mcdi_mtd_partition *part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) unsigned int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) unsigned long *found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) const struct efx_ef10_nvram_type_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) size_t size, erase_size, outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) int type_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) bool protected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) for (type_idx = 0; ; type_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) if (type_idx == EF10_NVRAM_PARTITION_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) info = efx_ef10_nvram_types + type_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) if ((type & ~info->type_mask) == info->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) if (info->port != efx_port_num(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) if (protected &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) /* Hide protected partitions that don't provide defaults. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if (protected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) /* Protected partitions are read only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) erase_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) /* If we've already exposed a partition of this type, hide this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) * duplicate. All operations on MTDs are keyed by the type anyway,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) * so we can't act on the duplicate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (__test_and_set_bit(type_idx, found))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) part->nvram_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) part->fw_subtype = MCDI_DWORD(outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) NVRAM_METADATA_OUT_SUBTYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) part->common.dev_type_name = "EF10 NVRAM manager";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) part->common.type_name = info->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) part->common.mtd.type = MTD_NORFLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) part->common.mtd.flags = MTD_CAP_NORFLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) part->common.mtd.size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) part->common.mtd.erasesize = erase_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) /* sfc_status is read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) if (!erase_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) part->common.mtd.flags |= MTD_NO_ERASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) static int efx_ef10_mtd_probe(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) struct efx_mcdi_mtd_partition *parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) size_t outlen, n_parts_total, i, n_parts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) if (n_parts_total >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) if (!parts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) n_parts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) for (i = 0; i < n_parts_total; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) found);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (rc == -EEXIST || rc == -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) n_parts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) kfree(parts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) #endif /* CONFIG_SFC_MTD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) u32 host_time) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) bool temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) channel->sync_events_state == SYNC_EVENTS_VALID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) channel->sync_events_state = SYNC_EVENTS_REQUESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) inbuf, sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) SYNC_EVENTS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) bool temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) channel->sync_events_state = SYNC_EVENTS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) SYNC_EVENTS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) inbuf, sizeof(inbuf), NULL, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) bool temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) int (*set)(struct efx_channel *channel, bool temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) set = en ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) efx_ef10_rx_enable_timestamping :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) efx_ef10_rx_disable_timestamping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) channel = efx_ptp_channel(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) if (channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) int rc = set(channel, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) if (en && rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) struct hwtstamp_config *init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) struct hwtstamp_config *init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) switch (init->rx_filter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) case HWTSTAMP_FILTER_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) efx_ef10_ptp_set_ts_sync_events(efx, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) /* if TX timestamping is still requested then leave PTP on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) return efx_ptp_change_mode(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) init->tx_type != HWTSTAMP_TX_OFF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) case HWTSTAMP_FILTER_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) case HWTSTAMP_FILTER_PTP_V2_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) case HWTSTAMP_FILTER_PTP_V2_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) case HWTSTAMP_FILTER_NTP_ALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) init->rx_filter = HWTSTAMP_FILTER_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) rc = efx_ptp_change_mode(efx, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) efx_ptp_change_mode(efx, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) struct netdev_phys_item_id *ppid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (!is_valid_ether_addr(nic_data->port_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) ppid->id_len = ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) memcpy(ppid->id, nic_data->port_id, ppid->id_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (proto != htons(ETH_P_8021Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) return efx_ef10_add_vlan(efx, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (proto != htons(ETH_P_8021Q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) return efx_ef10_del_vlan(efx, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) /* We rely on the MCDI wiping out our TX rings if it made any changes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) * ports table, ensuring that any TSO descriptors that were made on a now-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) * removed tunnel port will be blown away and won't break things when we try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) * to transmit them using the new ports table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) bool will_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) size_t num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) size_t inlen, outlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) efx_dword_t flags_and_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) nic_data->udp_tunnels_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) if (!(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) efx_device_attach_if_not_resetting(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) if (nic_data->udp_tunnels[i].type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) efx_dword_t entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) EFX_POPULATE_DWORD_2(entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) ntohs(nic_data->udp_tunnels[i].port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) nic_data->udp_tunnels[i].type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) *_MCDI_ARRAY_DWORD(inbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) num_entries++) = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) EFX_WORD_1_LBN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) EFX_WORD_1_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) EFX_POPULATE_DWORD_2(flags_and_num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) !!unloading,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) EFX_WORD_1, num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) flags_and_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) if (rc == -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) /* Most likely the MC rebooted due to another function also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) * setting its tunnel port list. Mark the tunnel port list as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) * dirty, so it will be pushed upon coming up from the reboot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) nic_data->udp_tunnels_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) /* expected not available on unprivileged functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) if (rc != -EPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) netif_warn(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) "Unable to set UDP tunnel ports; rc=%d.\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) netif_info(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) "Rebooting MC due to UDP tunnel port list change\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) will_reset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (unloading)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) /* Delay for the MC reset to complete. This will make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) * unloading other functions a bit smoother. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) * race, but the other unload will work whichever way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) * it goes, this just avoids an unnecessary error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) * message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (!will_reset && !unloading) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) /* The caller will have detached, relying on the MC reset to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) * trigger a re-attach. Since there won't be an MC reset, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) * have to do the attach ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) efx_device_attach_if_not_resetting(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) mutex_lock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (nic_data->udp_tunnels_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) /* Make sure all TX are stopped while we modify the table, else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) * we might race against an efx_features_check().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) efx_device_detach_sync(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) rc = efx_ef10_set_udp_tnl_ports(efx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) mutex_unlock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) unsigned int table, unsigned int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) struct efx_nic *efx = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) struct efx_ef10_nic_data *nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) int efx_tunnel_type, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) if (!(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) mutex_lock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) /* Make sure all TX are stopped while we add to the table, else we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) * might race against an efx_features_check().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) efx_device_detach_sync(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) nic_data->udp_tunnels[entry].type = efx_tunnel_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) nic_data->udp_tunnels[entry].port = ti->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) rc = efx_ef10_set_udp_tnl_ports(efx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) mutex_unlock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) /* Called under the TX lock with the TX queue running, hence no-one can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) * in the middle of updating the UDP tunnels table. However, they could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) * have tried and failed the MCDI, in which case they'll have set the dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) * flag before dropping their locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) if (!(nic_data->datapath_caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) if (nic_data->udp_tunnels_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) /* SW table may not match HW state, so just assume we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) * use any UDP tunnel offloads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) if (nic_data->udp_tunnels[i].type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) nic_data->udp_tunnels[i].port == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) unsigned int table, unsigned int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) struct udp_tunnel_info *ti)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) struct efx_nic *efx = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) struct efx_ef10_nic_data *nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) mutex_lock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) /* Make sure all TX are stopped while we remove from the table, else we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) * might race against an efx_features_check().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) efx_device_detach_sync(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) nic_data->udp_tunnels[entry].port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) rc = efx_ef10_set_udp_tnl_ports(efx, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) mutex_unlock(&nic_data->udp_tunnels_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) .set_port = efx_ef10_udp_tnl_set_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) .unset_port = efx_ef10_udp_tnl_unset_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) .tables = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) .n_entries = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) .tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) UDP_TUNNEL_TYPE_GENEVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) /* EF10 may have multiple datapath firmware variants within a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) * single version. Report which variants are running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) return scnprintf(buf, len, " rx%x tx%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) nic_data->rx_dpcpu_fw_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) nic_data->tx_dpcpu_fw_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) static unsigned int ef10_check_caps(const struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) u8 flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) const struct efx_ef10_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) switch (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) return nic_data->datapath_caps & BIT_ULL(flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) return nic_data->datapath_caps2 & BIT_ULL(flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) #define EF10_OFFLOAD_FEATURES \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) (NETIF_F_IP_CSUM | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) NETIF_F_HW_VLAN_CTAG_FILTER | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) NETIF_F_IPV6_CSUM | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) NETIF_F_RXHASH | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) NETIF_F_NTUPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) .is_vf = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) .mem_bar = efx_ef10_vf_mem_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) .mem_map_size = efx_ef10_mem_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) .probe = efx_ef10_probe_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) .remove = efx_ef10_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) .dimension_resources = efx_ef10_dimension_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) .init = efx_ef10_init_nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) .fini = efx_ef10_fini_nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) .map_reset_reason = efx_ef10_map_reset_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) .map_reset_flags = efx_ef10_map_reset_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) .reset = efx_ef10_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) .probe_port = efx_mcdi_port_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) .remove_port = efx_mcdi_port_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) .fini_dmaq = efx_fini_dmaq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) .prepare_flr = efx_ef10_prepare_flr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) .finish_flr = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) .describe_stats = efx_ef10_describe_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) .update_stats = efx_ef10_update_stats_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) .update_stats_atomic = efx_ef10_update_stats_atomic_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) .start_stats = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) .pull_stats = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) .stop_stats = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) .push_irq_moderation = efx_ef10_push_irq_moderation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) .reconfigure_mac = efx_ef10_mac_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) .check_mac_fault = efx_mcdi_mac_check_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) .reconfigure_port = efx_mcdi_port_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) .get_wol = efx_ef10_get_wol_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) .set_wol = efx_ef10_set_wol_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) .resume_wol = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) .mcdi_request = efx_ef10_mcdi_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) .mcdi_poll_response = efx_ef10_mcdi_poll_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) .mcdi_read_response = efx_ef10_mcdi_read_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) .irq_enable_master = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) .irq_test_generate = efx_ef10_irq_test_generate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) .irq_disable_non_ev = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) .irq_handle_msi = efx_ef10_msi_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) .irq_handle_legacy = efx_ef10_legacy_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) .tx_probe = efx_ef10_tx_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) .tx_init = efx_ef10_tx_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) .tx_remove = efx_mcdi_tx_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) .tx_write = efx_ef10_tx_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) .tx_limit_len = efx_ef10_tx_limit_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) .tx_enqueue = __efx_enqueue_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) .rx_probe = efx_mcdi_rx_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) .rx_init = efx_mcdi_rx_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) .rx_remove = efx_mcdi_rx_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) .rx_write = efx_ef10_rx_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) .rx_defer_refill = efx_ef10_rx_defer_refill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) .rx_packet = __efx_rx_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) .ev_probe = efx_mcdi_ev_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) .ev_init = efx_ef10_ev_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) .ev_fini = efx_mcdi_ev_fini,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) .ev_remove = efx_mcdi_ev_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) .ev_process = efx_ef10_ev_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) .ev_read_ack = efx_ef10_ev_read_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) .ev_test_generate = efx_ef10_ev_test_generate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) .filter_table_probe = efx_ef10_filter_table_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) .filter_table_restore = efx_mcdi_filter_table_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) .filter_table_remove = efx_mcdi_filter_table_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) .filter_insert = efx_mcdi_filter_insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) .filter_remove_safe = efx_mcdi_filter_remove_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) .filter_get_safe = efx_mcdi_filter_get_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) .filter_clear_rx = efx_mcdi_filter_clear_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) #ifdef CONFIG_SFC_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) .mtd_probe = efx_port_dummy_op_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) .vswitching_probe = efx_ef10_vswitching_probe_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) .vswitching_restore = efx_ef10_vswitching_restore_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) .vswitching_remove = efx_ef10_vswitching_remove_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) .get_mac_address = efx_ef10_get_mac_address_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) .set_mac_address = efx_ef10_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) .get_phys_port_id = efx_ef10_get_phys_port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) .revision = EFX_REV_HUNT_A0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) .can_rx_scatter = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) .always_rx_scatter = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) .min_interrupt_mode = EFX_INT_MODE_MSIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) .offload_features = EF10_OFFLOAD_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) .mcdi_max_ver = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 1 << HWTSTAMP_FILTER_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) .rx_hash_key_size = 40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) .check_caps = ef10_check_caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) .print_additional_fwver = efx_ef10_print_additional_fwver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) .sensor_event = efx_mcdi_sensor_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) const struct efx_nic_type efx_hunt_a0_nic_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) .is_vf = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) .mem_bar = efx_ef10_pf_mem_bar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) .mem_map_size = efx_ef10_mem_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) .probe = efx_ef10_probe_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) .remove = efx_ef10_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) .dimension_resources = efx_ef10_dimension_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) .init = efx_ef10_init_nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) .fini = efx_ef10_fini_nic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) .map_reset_reason = efx_ef10_map_reset_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) .map_reset_flags = efx_ef10_map_reset_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) .reset = efx_ef10_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) .probe_port = efx_mcdi_port_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) .remove_port = efx_mcdi_port_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) .fini_dmaq = efx_fini_dmaq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) .prepare_flr = efx_ef10_prepare_flr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) .finish_flr = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) .describe_stats = efx_ef10_describe_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) .update_stats = efx_ef10_update_stats_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) .start_stats = efx_mcdi_mac_start_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) .pull_stats = efx_mcdi_mac_pull_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) .stop_stats = efx_mcdi_mac_stop_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) .push_irq_moderation = efx_ef10_push_irq_moderation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) .reconfigure_mac = efx_ef10_mac_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) .check_mac_fault = efx_mcdi_mac_check_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) .reconfigure_port = efx_mcdi_port_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) .get_wol = efx_ef10_get_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) .set_wol = efx_ef10_set_wol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) .resume_wol = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) .test_chip = efx_ef10_test_chip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) .test_nvram = efx_mcdi_nvram_test_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) .mcdi_request = efx_ef10_mcdi_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) .mcdi_poll_response = efx_ef10_mcdi_poll_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) .mcdi_read_response = efx_ef10_mcdi_read_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) .irq_enable_master = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) .irq_test_generate = efx_ef10_irq_test_generate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) .irq_disable_non_ev = efx_port_dummy_op_void,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) .irq_handle_msi = efx_ef10_msi_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) .irq_handle_legacy = efx_ef10_legacy_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) .tx_probe = efx_ef10_tx_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) .tx_init = efx_ef10_tx_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) .tx_remove = efx_mcdi_tx_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) .tx_write = efx_ef10_tx_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) .tx_limit_len = efx_ef10_tx_limit_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) .tx_enqueue = __efx_enqueue_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) .rx_probe = efx_mcdi_rx_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) .rx_init = efx_mcdi_rx_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) .rx_remove = efx_mcdi_rx_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) .rx_write = efx_ef10_rx_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) .rx_defer_refill = efx_ef10_rx_defer_refill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) .rx_packet = __efx_rx_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) .ev_probe = efx_mcdi_ev_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) .ev_init = efx_ef10_ev_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) .ev_fini = efx_mcdi_ev_fini,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) .ev_remove = efx_mcdi_ev_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) .ev_process = efx_ef10_ev_process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) .ev_read_ack = efx_ef10_ev_read_ack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) .ev_test_generate = efx_ef10_ev_test_generate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) .filter_table_probe = efx_ef10_filter_table_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) .filter_table_restore = efx_mcdi_filter_table_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) .filter_table_remove = efx_mcdi_filter_table_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) .filter_insert = efx_mcdi_filter_insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) .filter_remove_safe = efx_mcdi_filter_remove_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) .filter_get_safe = efx_mcdi_filter_get_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) .filter_clear_rx = efx_mcdi_filter_clear_rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) #ifdef CONFIG_SFC_MTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) .mtd_probe = efx_ef10_mtd_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) .mtd_rename = efx_mcdi_mtd_rename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) .mtd_read = efx_mcdi_mtd_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) .mtd_erase = efx_mcdi_mtd_erase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) .mtd_write = efx_mcdi_mtd_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) .mtd_sync = efx_mcdi_mtd_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) .ptp_write_host_time = efx_ef10_ptp_write_host_time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) .sriov_configure = efx_ef10_sriov_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) .sriov_init = efx_ef10_sriov_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) .sriov_fini = efx_ef10_sriov_fini,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) .sriov_wanted = efx_ef10_sriov_wanted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) .sriov_reset = efx_ef10_sriov_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) .sriov_flr = efx_ef10_sriov_flr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) .vswitching_probe = efx_ef10_vswitching_probe_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) .vswitching_restore = efx_ef10_vswitching_restore_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) .vswitching_remove = efx_ef10_vswitching_remove_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) .get_mac_address = efx_ef10_get_mac_address_pf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) .set_mac_address = efx_ef10_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) .tso_versions = efx_ef10_tso_versions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) .get_phys_port_id = efx_ef10_get_phys_port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) .revision = EFX_REV_HUNT_A0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) .can_rx_scatter = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) .always_rx_scatter = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) .option_descriptors = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) .min_interrupt_mode = EFX_INT_MODE_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) .offload_features = EF10_OFFLOAD_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) .mcdi_max_ver = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 1 << HWTSTAMP_FILTER_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) .rx_hash_key_size = 40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) .check_caps = ef10_check_caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) .print_additional_fwver = efx_ef10_print_additional_fwver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) .sensor_event = efx_mcdi_sensor_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) };