^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005-2006 Fen Systems Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2006-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpu_rmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "bitfield.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "efx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "nic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "ef10_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "farch_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "workarounds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "mcdi_pcol.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Generic buffer handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * These buffers are used for interrupt status, MAC stats, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int len, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) &buffer->dma_addr, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (!buffer->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) buffer->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (buffer->addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) dma_free_coherent(&efx->pci_dev->dev, buffer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) buffer->addr, buffer->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) buffer->addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* Check whether an event is present in the eventq at the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * read pointer. Only useful for self-test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bool efx_nic_event_present(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void efx_nic_event_test_start(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) channel->event_test_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) channel->efx->type->ev_test_generate(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int efx_nic_irq_test_start(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) efx->last_irq_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return efx->type->irq_test_generate(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Hook interrupt handler(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Try MSI and then legacy interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int efx_nic_init_interrupt(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int n_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (!EFX_INT_MODE_USE_MSI(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rc = request_irq(efx->legacy_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) efx->type->irq_handle_legacy, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) efx->name, efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "failed to hook legacy IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) efx->pci_dev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) efx->irqs_hooked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) efx->net_dev->rx_cpu_rmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) alloc_irq_cpu_rmap(efx->n_rx_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!efx->net_dev->rx_cpu_rmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto fail1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Hook MSI or MSI-X interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) n_irqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) rc = request_irq(channel->irq, efx->type->irq_handle_msi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) IRQF_PROBE_SHARED, /* Not shared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) efx->msi_context[channel->channel].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) &efx->msi_context[channel->channel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) netif_err(efx, drv, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "failed to hook IRQ %d\n", channel->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ++n_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) channel->channel < efx->n_rx_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) channel->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) goto fail2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) efx->irqs_hooked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) fail2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) efx->net_dev->rx_cpu_rmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (n_irqs-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) free_irq(channel->irq, &efx->msi_context[channel->channel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) fail1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void efx_nic_fini_interrupt(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) efx->net_dev->rx_cpu_rmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!efx->irqs_hooked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (EFX_INT_MODE_USE_MSI(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Disable MSI/MSI-X interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) efx_for_each_channel(channel, efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) free_irq(channel->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) &efx->msi_context[channel->channel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Disable legacy interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) free_irq(efx->legacy_irq, efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) efx->irqs_hooked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Register dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define REGISTER_REVISION_FA 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define REGISTER_REVISION_FB 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define REGISTER_REVISION_FC 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define REGISTER_REVISION_ED 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct efx_nic_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) u32 offset:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 min_revision:3, max_revision:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define REGISTER(name, arch, min_rev, max_rev) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) arch ## R_ ## min_rev ## max_rev ## _ ## name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) REGISTER_REVISION_ ## arch ## min_rev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) REGISTER_REVISION_ ## arch ## max_rev \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define REGISTER_AA(name) REGISTER(name, F, A, A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define REGISTER_AB(name) REGISTER(name, F, A, B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define REGISTER_AZ(name) REGISTER(name, F, A, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define REGISTER_BB(name) REGISTER(name, F, B, B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define REGISTER_BZ(name) REGISTER(name, F, B, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define REGISTER_CZ(name) REGISTER(name, F, C, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define REGISTER_DZ(name) REGISTER(name, E, D, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static const struct efx_nic_reg efx_nic_regs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) REGISTER_AZ(ADR_REGION),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) REGISTER_AZ(INT_EN_KER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) REGISTER_BZ(INT_EN_CHAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) REGISTER_AZ(INT_ADR_KER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) REGISTER_BZ(INT_ADR_CHAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* INT_ACK_KER is WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* INT_ISR0 is RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) REGISTER_AZ(HW_INIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) REGISTER_CZ(USR_EV_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) REGISTER_AB(EE_SPI_HCMD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) REGISTER_AB(EE_SPI_HADR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) REGISTER_AB(EE_SPI_HDATA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) REGISTER_AB(EE_BASE_PAGE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) REGISTER_AB(EE_VPD_CFG0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* PCIE_CORE_INDIRECT is indirect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) REGISTER_AB(NIC_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) REGISTER_AB(GPIO_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) REGISTER_AB(GLB_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) REGISTER_BZ(DP_CTRL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) REGISTER_AZ(MEM_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) REGISTER_AZ(CS_DEBUG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) REGISTER_AZ(ALTERA_BUILD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) REGISTER_AZ(CSR_SPARE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) REGISTER_AB(PCIE_SD_CTL0123),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) REGISTER_AB(PCIE_SD_CTL45),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) REGISTER_AB(PCIE_PCS_CTL_STAT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* DEBUG_DATA_OUT is not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* DRV_EV is WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) REGISTER_AZ(EVQ_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) REGISTER_AZ(EVQ_CNT1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) REGISTER_AZ(EVQ_CNT2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) REGISTER_AZ(BUF_TBL_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) REGISTER_AZ(SRM_RX_DC_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) REGISTER_AZ(SRM_TX_DC_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) REGISTER_AZ(SRM_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* BUF_TBL_UPD is WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) REGISTER_AZ(SRM_UPD_EVQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) REGISTER_AZ(SRAM_PARITY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) REGISTER_AZ(RX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) REGISTER_BZ(RX_FILTER_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* RX_FLUSH_DESCQ is WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) REGISTER_AZ(RX_DC_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) REGISTER_AZ(RX_DC_PF_WM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) REGISTER_BZ(RX_RSS_TKEY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* RX_NODESC_DROP is RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) REGISTER_AA(RX_SELF_RST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* RX_DEBUG, RX_PUSH_DROP are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) REGISTER_CZ(RX_RSS_IPV6_REG1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) REGISTER_CZ(RX_RSS_IPV6_REG2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) REGISTER_CZ(RX_RSS_IPV6_REG3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* TX_FLUSH_DESCQ is WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) REGISTER_AZ(TX_DC_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) REGISTER_AA(TX_CHKSM_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) REGISTER_AZ(TX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* TX_PUSH_DROP is not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) REGISTER_AZ(TX_RESERVED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) REGISTER_BZ(TX_PACE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* TX_PACE_DROP_QID is RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) REGISTER_BB(TX_VLAN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) REGISTER_BZ(TX_IPFIL_PORTEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) REGISTER_AB(MD_TXD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) REGISTER_AB(MD_RXD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) REGISTER_AB(MD_CS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) REGISTER_AB(MD_PHY_ADR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) REGISTER_AB(MD_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* MD_STAT is RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) REGISTER_AB(MAC_STAT_DMA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) REGISTER_AB(MAC_CTRL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) REGISTER_BB(GEN_MODE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) REGISTER_AB(MAC_MC_HASH_REG0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) REGISTER_AB(MAC_MC_HASH_REG1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) REGISTER_AB(GM_CFG1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) REGISTER_AB(GM_CFG2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /* GM_IPG and GM_HD are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) REGISTER_AB(GM_MAX_FLEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* GM_TEST is not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) REGISTER_AB(GM_ADR1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) REGISTER_AB(GM_ADR2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) REGISTER_AB(GMF_CFG0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) REGISTER_AB(GMF_CFG1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) REGISTER_AB(GMF_CFG2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) REGISTER_AB(GMF_CFG3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) REGISTER_AB(GMF_CFG4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) REGISTER_AB(GMF_CFG5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) REGISTER_BB(TX_SRC_MAC_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) REGISTER_AB(XM_ADR_LO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) REGISTER_AB(XM_ADR_HI),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) REGISTER_AB(XM_GLB_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) REGISTER_AB(XM_TX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) REGISTER_AB(XM_RX_CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) REGISTER_AB(XM_MGT_INT_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) REGISTER_AB(XM_FC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) REGISTER_AB(XM_PAUSE_TIME),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) REGISTER_AB(XM_TX_PARAM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) REGISTER_AB(XM_RX_PARAM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* XM_MGT_INT_MSK (note no 'A') is RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) REGISTER_AB(XX_PWR_RST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) REGISTER_AB(XX_SD_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) REGISTER_AB(XX_TXDRV_CTL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* XX_CORE_STAT is partly RC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) REGISTER_DZ(BIU_HW_REV_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) REGISTER_DZ(MC_DB_LWRD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) REGISTER_DZ(MC_DB_HWRD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct efx_nic_reg_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) u32 offset:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) u32 min_revision:3, max_revision:3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 step:6, rows:21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) offset, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) REGISTER_REVISION_ ## arch ## min_rev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) REGISTER_REVISION_ ## arch ## max_rev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) step, rows \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define REGISTER_TABLE(name, arch, min_rev, max_rev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) REGISTER_TABLE_DIMENSIONS( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) arch, min_rev, max_rev, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define REGISTER_TABLE_BB_CZ(name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) FR_BZ_ ## name ## _STEP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) FR_BB_ ## name ## _ROWS), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) FR_BZ_ ## name ## _STEP, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) FR_CZ_ ## name ## _ROWS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* DRIVER is not used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) REGISTER_TABLE_BB(TX_IPFIL_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* We can't reasonably read all of the buffer table (up to 8MB!).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * However this driver will only use a few entries. Reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * 1K entries allows for some expansion of queue count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * size before we need to change the version. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) F, A, A, 8, 1024),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) F, B, Z, 8, 1024),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) REGISTER_TABLE_BB_CZ(TIMER_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* TX_FILTER_TBL0 is huge and not used by this driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) REGISTER_TABLE_CZ(MC_TREG_SMEM),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* MSIX_PBA_TABLE is not mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) REGISTER_TABLE_BZ(RX_FILTER_TBL0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) size_t efx_nic_get_regs_len(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) const struct efx_nic_reg *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) const struct efx_nic_reg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) size_t len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) for (reg = efx_nic_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) reg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (efx->type->revision >= reg->min_revision &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) efx->type->revision <= reg->max_revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) len += sizeof(efx_oword_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) for (table = efx_nic_reg_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) table++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (efx->type->revision >= table->min_revision &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) efx->type->revision <= table->max_revision)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) len += table->rows * min_t(size_t, table->step, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void efx_nic_get_regs(struct efx_nic *efx, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) const struct efx_nic_reg *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) const struct efx_nic_reg_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) for (reg = efx_nic_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) reg++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (efx->type->revision >= reg->min_revision &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) efx->type->revision <= reg->max_revision) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) efx_reado(efx, (efx_oword_t *)buf, reg->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) buf += sizeof(efx_oword_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) for (table = efx_nic_reg_tables;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) table++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) size_t size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!(efx->type->revision >= table->min_revision &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) efx->type->revision <= table->max_revision))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) size = min_t(size_t, table->step, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; i < table->rows; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) switch (table->step) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) case 4: /* 32-bit SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) efx_readd(efx, buf, table->offset + 4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case 8: /* 64-bit SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) efx_sram_readq(efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) efx->membase + table->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) buf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) case 16: /* 128-bit-readable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) efx_reado_table(efx, buf, table->offset, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) case 32: /* 128-bit register, interleaved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) efx_reado_table(efx, buf, table->offset, 2 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) buf += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * efx_nic_describe_stats - Describe supported statistics for ethtool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @desc: Array of &struct efx_hw_stat_desc describing the statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @count: Length of the @desc array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @mask: Bitmask of which elements of @desc are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @names: Buffer to copy names to, or %NULL. The names are copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * starting at intervals of %ETH_GSTRING_LEN bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Returns the number of visible statistics, i.e. the number of set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * bits in the first @count bits of @mask for which a name is defined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) const unsigned long *mask, u8 *names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) size_t visible = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) for_each_set_bit(index, mask, count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (desc[index].name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (names) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) strlcpy(names, desc[index].name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) names += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ++visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * efx_nic_copy_stats - Copy stats from the DMA buffer in to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * intermediate buffer. This is used to get a consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * set of stats while the DMA buffer can be written at any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * by the NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @efx: The associated NIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * @dest: Destination buffer. Must be the same size as the DMA buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __le64 *dma_stats = efx->stats_buffer.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) __le64 generation_start, generation_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int rc = 0, retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!dma_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto return_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* If we're unlucky enough to read statistics during the DMA, wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * up to 10ms for it to finish (typically takes <500us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) for (retry = 0; retry < 100; ++retry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) generation_end = dma_stats[efx->num_mac_stats - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto return_zeroes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (generation_end == generation_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return 0; /* return good data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return_zeroes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) memset(dest, 0, efx->num_mac_stats * sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * layout. DMA widths of 0, 16, 32 and 64 are supported; where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * the width is specified as 0 the corresponding element of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @stats is not updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @count: Length of the @desc array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * @mask: Bitmask of which elements of @desc are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @stats: Buffer to update with the converted statistics. The length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * of this array must be at least @count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @dma_buf: DMA buffer containing hardware statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @accumulate: If set, the converted values will be added rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * directly stored to the corresponding elements of @stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) const unsigned long *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) u64 *stats, const void *dma_buf, bool accumulate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) size_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) for_each_set_bit(index, mask, count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (desc[index].dma_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) const void *addr = dma_buf + desc[index].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) switch (desc[index].dma_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) val = le16_to_cpup((__le16 *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) val = le32_to_cpup((__le32 *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) val = le64_to_cpup((__le64 *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (accumulate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) stats[index] += val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) stats[index] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* if down, or this is the first update after coming up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) efx->rx_nodesc_drops_while_down +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *rx_nodesc_drops - efx->rx_nodesc_drops_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) efx->rx_nodesc_drops_total = *rx_nodesc_drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }