Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2005-2006 Fen Systems Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2006-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "bitfield.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "efx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "rx_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "nic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "farch_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "sriov.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "siena_sriov.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "workarounds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /* Falcon-architecture (SFC9000-family) support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * Configurable values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  **************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /* This is set to 16 for a good reason.  In summary, if larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * 16, the descriptor cache holds more than a default socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * buffer's worth of packets (for UDP we can only have at most one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * socket buffer's worth outstanding).  This combined with the fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * that we only get 1 TX event per descriptor cache means the NIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * goes idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define TX_DC_ENTRIES 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define TX_DC_ENTRIES_ORDER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define RX_DC_ENTRIES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define RX_DC_ENTRIES_ORDER 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) /* If EFX_MAX_INT_ERRORS internal errors occur within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define EFX_INT_ERROR_EXPIRE 3600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define EFX_MAX_INT_ERRORS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /* Depth of RX flush request fifo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define EFX_RX_FLUSH_COUNT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) /* Driver generated events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define _EFX_CHANNEL_MAGIC_TEST		0x000101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define _EFX_CHANNEL_MAGIC_FILL		0x000102
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define _EFX_CHANNEL_MAGIC_RX_DRAIN	0x000103
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define _EFX_CHANNEL_MAGIC_TX_DRAIN	0x000104
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define _EFX_CHANNEL_MAGIC(_code, _data)	((_code) << 8 | (_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define _EFX_CHANNEL_MAGIC_CODE(_magic)		((_magic) >> 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define EFX_CHANNEL_MAGIC_TEST(_channel)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define EFX_CHANNEL_MAGIC_FILL(_rx_queue)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 			   efx_rx_queue_index(_rx_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 			   efx_rx_queue_index(_rx_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 			   (_tx_queue)->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * Hardware access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 				     unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			value, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 				     const efx_oword_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) int efx_farch_test_registers(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			     const struct efx_farch_register_test *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			     size_t n_regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	unsigned address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	efx_oword_t mask, imask, original, reg, buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	for (i = 0; i < n_regs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		address = regs[i].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		mask = imask = regs[i].mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		EFX_INVERT_OWORD(imask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		efx_reado(efx, &original, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		/* bit sweep on and off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		for (j = 0; j < 128; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			if (!EFX_EXTRACT_OWORD32(mask, j, j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 			/* Test this testable bit can be set in isolation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			EFX_AND_OWORD(reg, original, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			EFX_SET_OWORD32(reg, j, j, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			efx_writeo(efx, &reg, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			efx_reado(efx, &buf, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			if (efx_masked_compare_oword(&reg, &buf, &mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			/* Test this testable bit can be cleared in isolation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			EFX_OR_OWORD(reg, original, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			EFX_SET_OWORD32(reg, j, j, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			efx_writeo(efx, &reg, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			efx_reado(efx, &buf, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			if (efx_masked_compare_oword(&reg, &buf, &mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		efx_writeo(efx, &original, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		  "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		  " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		  EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * Special buffer handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * Special buffers are used for event queues and the TX and RX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * descriptor rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  *************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * Initialise a special buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * This will define a buffer (previously allocated via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  * efx_alloc_special_buffer()) in the buffer table, allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * it to be used for event queues, descriptor rings etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	efx_qword_t buf_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	EFX_WARN_ON_PARANOID(!buffer->buf.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* Write buffer descriptors to NIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	for (i = 0; i < buffer->entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		index = buffer->index + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 			  "mapping special buffer %d at %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			  index, (unsigned long long)dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		EFX_POPULATE_QWORD_3(buf_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 				     FRF_AZ_BUF_ADR_REGION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		efx_write_buf_tbl(efx, &buf_desc, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /* Unmaps a buffer and clears the buffer table entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	efx_oword_t buf_tbl_upd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	unsigned int start = buffer->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	unsigned int end = (buffer->index + buffer->entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (!buffer->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		  buffer->index, buffer->index + buffer->entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	EFX_POPULATE_OWORD_4(buf_tbl_upd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			     FRF_AZ_BUF_UPD_CMD, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			     FRF_AZ_BUF_CLR_CMD, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 			     FRF_AZ_BUF_CLR_END_ID, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 			     FRF_AZ_BUF_CLR_START_ID, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * Allocate a new special buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * This allocates memory for a new buffer, clears it and allocates a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * new buffer ID range.  It does not write into the buffer table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * This call will allocate 4KB buffers, since 8KB buffers can't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * used for event queues and descriptor rings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static int efx_alloc_special_buffer(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				    struct efx_special_buffer *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 				    unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	struct siena_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	len = ALIGN(len, EFX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	buffer->entries = len / EFX_BUF_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	/* Select new buffer ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	buffer->index = efx->next_buffer_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	efx->next_buffer_table += buffer->entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	BUG_ON(efx_siena_sriov_enabled(efx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	       nic_data->vf_buftbl_base < efx->next_buffer_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	netif_dbg(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		  "allocating special buffers %d-%d at %llx+%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		  "(virt %p phys %llx)\n", buffer->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		  buffer->index + buffer->entries - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		  (u64)buffer->buf.dma_addr, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (!buffer->buf.addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		  "deallocating special buffers %d-%d at %llx+%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		  "(virt %p phys %llx)\n", buffer->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		  buffer->index + buffer->entries - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		  (u64)buffer->buf.dma_addr, buffer->buf.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	efx_nic_free_buffer(efx, &buffer->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	buffer->entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * TX path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	unsigned write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	efx_writed_page(tx_queue->efx, &reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) /* Write pointer and first descriptor for TX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 					  const efx_qword_t *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	unsigned write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 			     FRF_AZ_TX_DESC_WPTR, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	reg.qword[0] = *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	efx_writeo_page(tx_queue->efx, &reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /* For each entry inserted into the software descriptor ring, create a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * descriptor in the hardware TX descriptor ring (in host memory), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * write a doorbell.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	struct efx_tx_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	efx_qword_t *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	unsigned write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	unsigned old_write_count = tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	tx_queue->xmit_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		buffer = &tx_queue->buffer[write_ptr];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		txd = efx_tx_desc(tx_queue, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		++tx_queue->write_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		/* Create TX descriptor ring entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		EFX_POPULATE_QWORD_4(*txd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 				     FSF_AZ_TX_KER_CONT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				     buffer->flags & EFX_TX_BUF_CONT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				     FSF_AZ_TX_KER_BUF_REGION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	} while (tx_queue->write_count != tx_queue->insert_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	wmb(); /* Ensure descriptors are written before they are fetched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		txd = efx_tx_desc(tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 				  old_write_count & tx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		efx_farch_push_tx_desc(tx_queue, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		++tx_queue->pushes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		efx_farch_notify_tx_desc(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				    dma_addr_t dma_addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	/* Don't cross 4K boundaries with descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	len = min(limit, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) /* Allocate hardware resources for a TX queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct efx_nic *efx = tx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	unsigned entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	entries = tx_queue->ptr_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return efx_alloc_special_buffer(efx, &tx_queue->txd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 					entries * sizeof(efx_qword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct efx_nic *efx = tx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/* Pin TX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	efx_init_special_buffer(efx, &tx_queue->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	/* Push TX descriptor ring to card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	EFX_POPULATE_OWORD_10(reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			      FRF_AZ_TX_DESCQ_EN, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			      FRF_AZ_TX_DESCQ_EVQ_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			      tx_queue->channel->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			      FRF_AZ_TX_DESCQ_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			      __ffs(tx_queue->txd.entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			      FRF_AZ_TX_DESCQ_TYPE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			 tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	EFX_POPULATE_OWORD_1(reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			     FRF_BZ_TX_PACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			     (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			     FFE_BZ_TX_PACE_OFF :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			     FFE_BZ_TX_PACE_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	tx_queue->tso_version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct efx_nic *efx = tx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	efx_oword_t tx_flush_descq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	WARN_ON(atomic_read(&tx_queue->flush_outstanding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	atomic_set(&tx_queue->flush_outstanding, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	EFX_POPULATE_OWORD_2(tx_flush_descq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct efx_nic *efx = tx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	efx_oword_t tx_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	/* Remove TX descriptor ring from card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	EFX_ZERO_OWORD(tx_desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			 tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	/* Unpin TX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	efx_fini_special_buffer(efx, &tx_queue->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) /* Free buffers backing TX queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * RX path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) /* This creates an entry in the RX descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	struct efx_rx_buffer *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	efx_qword_t *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	rxd = efx_rx_desc(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	rx_buf = efx_rx_buffer(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	EFX_POPULATE_QWORD_3(*rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			     FSF_AZ_RX_KER_BUF_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			     rx_buf->len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			     rx_queue->efx->type->rx_buffer_padding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			     FSF_AZ_RX_KER_BUF_REGION, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) /* This writes to the RX_DESC_WPTR register for the specified receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * descriptor ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	unsigned write_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	while (rx_queue->notified_count != rx_queue->added_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		efx_farch_build_rx_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			rx_queue->notified_count & rx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		++rx_queue->notified_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	unsigned entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	entries = rx_queue->ptr_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	return efx_alloc_special_buffer(efx, &rx_queue->rxd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 					entries * sizeof(efx_qword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	efx_oword_t rx_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	bool jumbo_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	/* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	jumbo_en = efx->rx_scatter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		  "RX queue %d ring in special buffers %d-%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		  efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	rx_queue->scatter_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/* Pin RX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	efx_init_special_buffer(efx, &rx_queue->rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	/* Push RX descriptor ring to card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	EFX_POPULATE_OWORD_10(rx_desc_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			      FRF_AZ_RX_ISCSI_DDIG_EN, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			      FRF_AZ_RX_ISCSI_HDIG_EN, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			      FRF_AZ_RX_DESCQ_EVQ_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			      efx_rx_queue_channel(rx_queue)->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			      FRF_AZ_RX_DESCQ_LABEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			      efx_rx_queue_index(rx_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			      FRF_AZ_RX_DESCQ_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			      __ffs(rx_queue->rxd.entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			      FRF_AZ_RX_DESCQ_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			 efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	efx_oword_t rx_flush_descq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	EFX_POPULATE_OWORD_2(rx_flush_descq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			     FRF_AZ_RX_FLUSH_DESCQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			     efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	efx_oword_t rx_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/* Remove RX descriptor ring from card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	EFX_ZERO_OWORD(rx_desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			 efx_rx_queue_index(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	/* Unpin RX descriptor ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	efx_fini_special_buffer(efx, &rx_queue->rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) /* Free buffers backing RX queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  * Flush handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) /* efx_farch_flush_queues() must be woken up when all flushes are completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * or more RX flushes can be kicked off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) static bool efx_farch_flush_wake(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	/* Ensure that all updates are visible to efx_farch_flush_queues() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	return (atomic_read(&efx->active_queues) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		(atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		 && atomic_read(&efx->rxq_flush_pending) > 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static bool efx_check_tx_flush_complete(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	bool i = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	efx_oword_t txd_ptr_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		efx_for_each_channel_tx_queue(tx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			efx_reado_table(efx, &txd_ptr_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			if (EFX_OWORD_FIELD(txd_ptr_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 					    FRF_AZ_TX_DESCQ_FLUSH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			    EFX_OWORD_FIELD(txd_ptr_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 					    FRF_AZ_TX_DESCQ_EN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 					  "flush did not complete on TXQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 					  tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				i = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 						  1, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 				/* The flush is complete, but we didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 				 * receive a flush completion event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 					  "flush complete on TXQ %d, so drain "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 					  "the queue\n", tx_queue->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 				/* Don't need to increment active_queues as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 				 * has already been incremented for the queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				 * which did not drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 				efx_farch_magic_event(channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 						      EFX_CHANNEL_MAGIC_TX_DRAIN(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 							      tx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) /* Flush all the transmit queues, and continue flushing receive queues until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * they're all flushed. Wait for the DRAIN events to be received so that there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * are no more RX and TX events left on any channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) static int efx_farch_do_flush(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		efx_for_each_channel_tx_queue(tx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			efx_farch_flush_tx_queue(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		efx_for_each_channel_rx_queue(rx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			rx_queue->flush_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			atomic_inc(&efx->rxq_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	while (timeout && atomic_read(&efx->active_queues) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		/* If SRIOV is enabled, then offload receive queue flushing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 * the firmware (though we will still have to poll for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 * completion). If that fails, fall back to the old scheme.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		if (efx_siena_sriov_enabled(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			rc = efx_mcdi_flush_rxqs(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 				goto wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		/* The hardware supports four concurrent rx flushes, each of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		 * which may need to be retried if there is an outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		 * descriptor fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			efx_for_each_channel_rx_queue(rx_queue, channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				if (atomic_read(&efx->rxq_flush_outstanding) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				    EFX_RX_FLUSH_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				if (rx_queue->flush_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 					rx_queue->flush_pending = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 					atomic_dec(&efx->rxq_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 					atomic_inc(&efx->rxq_flush_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 					efx_farch_flush_rx_queue(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		timeout = wait_event_timeout(efx->flush_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 					     efx_farch_flush_wake(efx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 					     timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (atomic_read(&efx->active_queues) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	    !efx_check_tx_flush_complete(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			  "(rx %d+%d)\n", atomic_read(&efx->active_queues),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			  atomic_read(&efx->rxq_flush_outstanding),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			  atomic_read(&efx->rxq_flush_pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		atomic_set(&efx->active_queues, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		atomic_set(&efx->rxq_flush_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		atomic_set(&efx->rxq_flush_outstanding, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) int efx_farch_fini_dmaq(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* Do not attempt to write to the NIC during EEH recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (efx->state != STATE_RECOVERY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		/* Only perform flush if DMA is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		if (efx->pci_dev->is_busmaster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			efx->type->prepare_flush(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			rc = efx_farch_do_flush(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			efx->type->finish_flush(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			efx_for_each_channel_rx_queue(rx_queue, channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				efx_farch_rx_fini(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			efx_for_each_channel_tx_queue(tx_queue, channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				efx_farch_tx_fini(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) /* Reset queue and flush accounting after FLR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * mastering was disabled), in which case we don't receive (RXQ) flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * completion events.  This means that efx->rxq_flush_outstanding remained at 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * after the FLR; also, efx->active_queues was non-zero (as no flush completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * events were received, and we didn't go through efx_check_tx_flush_complete())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * If we don't fix this up, on the next call to efx_realloc_channels() we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * for batched flush requests; and the efx->active_queues gets messed up because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * we keep incrementing for the newly initialised queues, but it never went to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753)  * zero previously.  Then we get a timeout every time we try to restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754)  * queues, as it doesn't go back to zero when we should be flushing the queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) void efx_farch_finish_flr(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	atomic_set(&efx->rxq_flush_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	atomic_set(&efx->rxq_flush_outstanding, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	atomic_set(&efx->active_queues, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  * Event queue processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * Event queues are processed by per-channel tasklets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) /* Update a channel's event queue's read pointer (RPTR) register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  * This writes the EVQ_RPTR_REG register for the specified channel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774)  * event queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) void efx_farch_ev_read_ack(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			     channel->eventq_read_ptr & channel->eventq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	 * of 4 bytes, but it is really 16 bytes just like later revisions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	efx_writed(efx, &reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		   efx->type->evq_rptr_tbl_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		   FR_BZ_EVQ_RPTR_STEP * channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) /* Use HW to insert a SW defined event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			      efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	efx_oword_t drv_ev_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	drv_ev_reg.u32[0] = event->u32[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	drv_ev_reg.u32[1] = event->u32[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	drv_ev_reg.u32[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	drv_ev_reg.u32[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	efx_qword_t event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	efx_farch_generate_event(channel->efx, channel->channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) /* Handle a transmit completion event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * The NIC batches TX completion events; the message we receive is of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * the form "complete all TX events up to this index".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	unsigned int tx_ev_desc_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	unsigned int tx_ev_q_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (unlikely(READ_ONCE(efx->reset_pending)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		/* Transmit completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		tx_queue = channel->tx_queue +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		/* Rewrite the FIFO write pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		tx_queue = channel->tx_queue +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		netif_tx_lock(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		efx_farch_notify_tx_desc(tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		netif_tx_unlock(efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		netif_err(efx, tx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			  "channel %d unexpected TX event "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			  EFX_QWORD_FMT"\n", channel->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			  EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) /* Detect errors included in the rx_evt_pkt_ok bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				      const efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	bool rx_ev_frm_trunc, rx_ev_tobe_disc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	bool rx_ev_other_err, rx_ev_pause_frm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	/* Every error apart from tobe_disc and pause_frm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	/* Count errors that are not in MAC stats.  Ignore expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	 * checksum errors during self-test. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (rx_ev_frm_trunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		++channel->n_rx_frm_trunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	else if (rx_ev_tobe_disc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		++channel->n_rx_tobe_disc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	else if (!efx->loopback_selftest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (rx_ev_ip_hdr_chksum_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			++channel->n_rx_ip_hdr_chksum_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		else if (rx_ev_tcp_udp_chksum_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			++channel->n_rx_tcp_udp_chksum_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	/* TOBE_DISC is expected on unicast mismatches; don't print out an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * to a FIFO overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (rx_ev_other_err && net_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		netif_dbg(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			  " RX queue %d unexpected RX event "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			  EFX_QWORD_FMT "%s%s%s%s%s%s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			  efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			  rx_ev_ip_hdr_chksum_err ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			  " [IP_HDR_CHKSUM_ERR]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			  rx_ev_tcp_udp_chksum_err ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			  " [TCP_UDP_CHKSUM_ERR]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			  rx_ev_pause_frm ? " [PAUSE]" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	(void) rx_ev_other_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (efx->net_dev->features & NETIF_F_RXALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		/* don't discard frame for CRC error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		rx_ev_eth_crc_err = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/* The frame must be discarded if any of these are true. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		rx_ev_tobe_disc | rx_ev_pause_frm) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		EFX_RX_PKT_DISCARD : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) /* Handle receive events that are not in-order. Return true if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * can be handled as a partial packet discard, false if it's more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * serious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	unsigned expected, dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	if (rx_queue->scatter_n &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		      rx_queue->ptr_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		++channel->n_rx_nodesc_trunc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	expected = rx_queue->removed_count & rx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	dropped = (index - expected) & rx_queue->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	netif_info(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		   "dropped %d events (index=%d expected=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		   dropped, index, expected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) /* Handle a packet received event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * The NIC gives a "discard" flag if it's a unicast packet with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  * wrong destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * Also "is multicast" and "matches multicast filter" flags can be used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  * discard non-matching multicast packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	unsigned expected_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (unlikely(READ_ONCE(efx->reset_pending)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			rx_queue->ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* Check for partial drops and other errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (rx_ev_desc_ptr != expected_ptr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		    !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		/* Discard all pending fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		if (rx_queue->scatter_n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			efx_rx_packet(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				rx_queue->removed_count & rx_queue->ptr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			rx_queue->removed_count += rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			rx_queue->scatter_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		/* Return if there is no new fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (rx_ev_desc_ptr != expected_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		/* Discard new fragment if not SOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if (!rx_ev_sop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			efx_rx_packet(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				rx_queue->removed_count & rx_queue->ptr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				1, 0, EFX_RX_PKT_DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			++rx_queue->removed_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	++rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if (rx_ev_cont)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (likely(rx_ev_pkt_ok)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		/* If packet is marked as OK then we can rely on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		 * hardware checksum and classification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		switch (rx_ev_hdr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			flags |= EFX_RX_PKT_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			flags |= EFX_RX_PKT_CSUMMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		flags = efx_farch_handle_rx_not_ok(rx_queue, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	/* Detect multicast packets that didn't match the filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (rx_ev_mcast_pkt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		unsigned int rx_ev_mcast_hash_match =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		if (unlikely(!rx_ev_mcast_hash_match)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			++channel->n_rx_mcast_mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			flags |= EFX_RX_PKT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	channel->irq_mod_score += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* Handle received packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	efx_rx_packet(rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		      rx_queue->removed_count & rx_queue->ptr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	rx_queue->removed_count += rx_queue->scatter_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	rx_queue->scatter_n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /* If this flush done event corresponds to a &struct efx_tx_queue, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)  * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * of all transmit completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct efx_tx_queue *tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	int qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		channel = efx_get_tx_channel(efx, qid / EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		tx_queue = channel->tx_queue + (qid % EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			efx_farch_magic_event(tx_queue->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					      EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * the RX queue back to the mask of RX queues in need of flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	int qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	bool failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (qid >= efx->n_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	channel = efx_get_channel(efx, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (!efx_channel_has_rx_queue(channel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		netif_info(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			   "RXQ %d flush retry\n", qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		rx_queue->flush_pending = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		atomic_inc(&efx->rxq_flush_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 				      EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	atomic_dec(&efx->rxq_flush_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	if (efx_farch_flush_wake(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		wake_up(&efx->flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) efx_farch_handle_drain_event(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	WARN_ON(atomic_read(&efx->active_queues) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	atomic_dec(&efx->active_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	if (efx_farch_flush_wake(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		wake_up(&efx->flush_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static void efx_farch_handle_generated_event(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 					     efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	struct efx_rx_queue *rx_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		efx_channel_has_rx_queue(channel) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		efx_channel_get_rx_queue(channel) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	unsigned magic, code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	code = _EFX_CHANNEL_MAGIC_CODE(magic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		channel->event_test_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		/* The queue must be empty, so we won't receive any rx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		 * events, so efx_process_channel() won't refill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		 * queue. Refill it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		efx_fast_push_rx_descriptors(rx_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		efx_farch_handle_drain_event(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		efx_farch_handle_drain_event(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			  "generated event "EFX_QWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			  channel->channel, EFX_QWORD_VAL(*event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	unsigned int ev_sub_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	unsigned int ev_sub_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	switch (ev_sub_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			   channel->channel, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		efx_farch_handle_tx_flush_done(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		efx_siena_sriov_tx_flush_done(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			   channel->channel, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		efx_farch_handle_rx_flush_done(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		efx_siena_sriov_rx_flush_done(efx, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	case FSE_AZ_EVQ_INIT_DONE_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			  "channel %d EVQ %d initialised\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			  channel->channel, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	case FSE_AZ_SRM_UPD_DONE_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			   "channel %d SRAM update done\n", channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	case FSE_AZ_WAKE_UP_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			   "channel %d RXQ %d wakeup event\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			   channel->channel, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	case FSE_AZ_TIMER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			   "channel %d RX queue %d timer expired\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			   channel->channel, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	case FSE_AA_RX_RECOVER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			  "channel %d seen DRIVER RX_RESET event. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			"Resetting.\n", channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		atomic_inc(&efx->rx_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	case FSE_BZ_RX_DSC_ERROR_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		if (ev_sub_data < EFX_VI_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				  "RX DMA Q %d reports descriptor fetch error."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				  " RX Q %d is disabled.\n", ev_sub_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 				  ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	case FSE_BZ_TX_DSC_ERROR_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		if (ev_sub_data < EFX_VI_BASE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			netif_err(efx, tx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				  "TX DMA Q %d reports descriptor fetch error."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 				  " TX Q %d is disabled.\n", ev_sub_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				  ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			   "channel %d unknown driver event code %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			   "data %04x\n", channel->channel, ev_sub_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			   ev_sub_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) int efx_farch_ev_process(struct efx_channel *channel, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	unsigned int read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	efx_qword_t event, *p_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	int ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	int spent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (budget <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		return spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	read_ptr = channel->eventq_read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		p_event = efx_event(channel, read_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		event = *p_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		if (!efx_event_present(&event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			/* End of events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			   "channel %d event is "EFX_QWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			   channel->channel, EFX_QWORD_VAL(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		/* Clear this event by marking it all ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		EFX_SET_QWORD(*p_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		++read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		switch (ev_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		case FSE_AZ_EV_CODE_RX_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			efx_farch_handle_rx_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			if (++spent == budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		case FSE_AZ_EV_CODE_TX_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 			efx_farch_handle_tx_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			efx_farch_handle_generated_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		case FSE_AZ_EV_CODE_DRIVER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			efx_farch_handle_driver_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		case FSE_CZ_EV_CODE_USER_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			efx_siena_sriov_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		case FSE_CZ_EV_CODE_MCDI_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			efx_mcdi_process_event(channel, &event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		case FSE_AZ_EV_CODE_GLOBAL_EV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			if (efx->type->handle_global_event &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			    efx->type->handle_global_event(channel, &event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			netif_err(channel->efx, hw, channel->efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 				  "channel %d unknown event type %d (data "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				  EFX_QWORD_FMT ")\n", channel->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				  ev_code, EFX_QWORD_VAL(event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	channel->eventq_read_ptr = read_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	return spent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Allocate buffer table entries for event queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int efx_farch_ev_probe(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	unsigned entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	entries = channel->eventq_mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	return efx_alloc_special_buffer(efx, &channel->eventq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 					entries * sizeof(efx_qword_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int efx_farch_ev_init(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	netif_dbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		  "channel %d event queue in special buffers %d-%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		  channel->channel, channel->eventq.index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		  channel->eventq.index + channel->eventq.entries - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	EFX_POPULATE_OWORD_3(reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			     FRF_CZ_TIMER_Q_EN, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			     FRF_CZ_HOST_NOTIFY_MODE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			     FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	/* Pin event queue buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	efx_init_special_buffer(efx, &channel->eventq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	/* Fill event queue with all ones (i.e. empty events) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	/* Push event queue to card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	EFX_POPULATE_OWORD_3(reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			     FRF_AZ_EVQ_EN, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 			 channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) void efx_farch_ev_fini(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	/* Remove event queue from card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	EFX_ZERO_OWORD(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 			 channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	/* Unpin event queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	efx_fini_special_buffer(efx, &channel->eventq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* Free buffers backing event queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) void efx_farch_ev_remove(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	efx_free_special_buffer(channel->efx, &channel->eventq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void efx_farch_ev_test_generate(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			      EFX_CHANNEL_MAGIC_FILL(rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  * Hardware interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  * The hardware interrupt handler does very little work; all the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  * queue processing is carried out by per-channel tasklets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /* Enable/disable/generate interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static inline void efx_farch_interrupts(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 				      bool enabled, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	efx_oword_t int_en_reg_ker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	EFX_POPULATE_OWORD_3(int_en_reg_ker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			     FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 			     FRF_AZ_KER_INT_KER, force,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			     FRF_AZ_DRV_INT_EN_KER, enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) void efx_farch_irq_enable_master(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	efx_farch_interrupts(efx, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) void efx_farch_irq_disable_master(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	efx_farch_interrupts(efx, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Generate a test interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  * Interrupt must already have been enabled, otherwise nasty things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  * may happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) int efx_farch_irq_test_generate(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	efx_farch_interrupts(efx, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) /* Process a fatal interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * Disable bus mastering ASAP and schedule a reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	efx_oword_t *int_ker = efx->irq_status.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	efx_oword_t fatal_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	int error, mem_perr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		  EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		  EFX_OWORD_VAL(fatal_intr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		  error ? "disabling bus mastering" : "no recognised error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	/* If this is a memory parity error dump which blocks are offending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		    EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if (mem_perr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		efx_oword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			  "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			  EFX_OWORD_VAL(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	/* Disable both devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	pci_clear_master(efx->pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	efx_farch_irq_disable_master(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	/* Count errors and reset or disable the NIC accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (efx->int_error_count == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	    time_after(jiffies, efx->int_error_expire)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		efx->int_error_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		efx->int_error_expire =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			jiffies + EFX_INT_ERROR_EXPIRE * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			  "SYSTEM ERROR - reset scheduled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		netif_err(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			  "SYSTEM ERROR - max number of errors seen."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			  "NIC will be disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /* Handle a legacy interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)  * Acknowledges the interrupt and schedule event queue processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	struct efx_nic *efx = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	efx_oword_t *int_ker = efx->irq_status.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	irqreturn_t result = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	struct efx_channel *channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	efx_dword_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	u32 queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	int syserr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	/* Read the ISR which also ACKs the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	/* Legacy interrupts are disabled too late by the EEH kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	 * code. Disable them earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	 * If an EEH error occurred, the read will have returned all ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	    !efx->eeh_disabled_legacy_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		disable_irq_nosync(efx->legacy_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		efx->eeh_disabled_legacy_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	/* Handle non-event-queue sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (queues & (1U << efx->irq_level) && soft_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		if (unlikely(syserr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			return efx_farch_fatal_interrupt(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		efx->last_irq_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	if (queues != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		efx->irq_zero_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		/* Schedule processing of any interrupting queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		if (likely(soft_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 			efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 				if (queues & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 					efx_schedule_channel_irq(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 				queues >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		result = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		efx_qword_t *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		/* Legacy ISR read can return zero once (SF bug 15783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		 * because this might be a shared interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		if (efx->irq_zero_count++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			result = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		/* Ensure we schedule or rearm all event queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		if (likely(soft_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			efx_for_each_channel(channel, efx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				event = efx_event(channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 						  channel->eventq_read_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 				if (efx_event_present(event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 					efx_schedule_channel_irq(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 					efx_farch_ev_read_ack(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	if (result == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		netif_vdbg(efx, intr, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /* Handle an MSI interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * Handle an MSI hardware interrupt.  This routine schedules event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * queue processing.  No interrupt acknowledgement cycle is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * Also, we never need to check that the interrupt is for us, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * MSI interrupts cannot be shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct efx_msi_context *context = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	struct efx_nic *efx = context->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	efx_oword_t *int_ker = efx->irq_status.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	int syserr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	netif_vdbg(efx, intr, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	/* Handle non-event-queue sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	if (context->index == efx->irq_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 		if (unlikely(syserr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			return efx_farch_fatal_interrupt(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		efx->last_irq_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	/* Schedule processing of the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	efx_schedule_channel_irq(efx->channel[context->index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /* Setup RSS indirection table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * This maps from the hash value of the packet to RXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) void efx_farch_rx_push_indir_table(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	size_t i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	efx_dword_t dword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 				     efx->rss_context.rx_indir_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		efx_writed(efx, &dword,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 			   FR_BZ_RX_INDIRECTION_TBL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	size_t i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	efx_dword_t dword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		efx_readd(efx, &dword,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			   FR_BZ_RX_INDIRECTION_TBL +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /* Looks at available SRAM resources and works out how many queues we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  * can support, and where things like descriptor caches should live.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)  * SRAM is split up as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)  * 0                          buftbl entries for channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  * efx->vf_buftbl_base        buftbl entries for SR-IOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)  * efx->rx_dc_base            RX descriptor caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)  * efx->tx_dc_base            TX descriptor caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	unsigned vi_count, buftbl_min, total_tx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct siena_nic_data *nic_data = efx->nic_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	/* Account for the buffer table entries backing the datapath channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	 * and the descriptor caches for those channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		       total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		       efx->n_channels * EFX_MAX_EVQ_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		      * sizeof(efx_qword_t) / EFX_BUF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) #ifdef CONFIG_SFC_SRIOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	if (efx->type->sriov_wanted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		if (efx->type->sriov_wanted(efx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			unsigned vi_dc_entries, buftbl_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			unsigned entries_per_vf, vf_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			nic_data->vf_buftbl_base = buftbl_min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			vi_count = max(vi_count, EFX_VI_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			buftbl_free = (sram_lim_qw - buftbl_min -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 				       vi_count * vi_dc_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			entries_per_vf = ((vi_dc_entries +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 					   EFX_VF_BUFTBL_PER_VI) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 					  efx_vf_size(efx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			vf_limit = min(buftbl_free / entries_per_vf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				       (1024U - EFX_VI_BASE) >> efx->vi_scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			if (efx->vf_count > vf_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				netif_err(efx, probe, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 					  "Reducing VF count from from %d to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 					  efx->vf_count, vf_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				efx->vf_count = vf_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			vi_count += efx->vf_count * efx_vf_size(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) u32 efx_farch_fpga_ver(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	efx_oword_t altera_build;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) void efx_farch_init_common(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	efx_oword_t temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	/* Set positions of descriptor caches in SRAM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	/* Set TX descriptor cache size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	 * this allows most efficient prefetching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	/* Program INT_KER address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	EFX_POPULATE_OWORD_2(temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			     FRF_AZ_NORM_INT_VEC_DIS_KER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			     EFX_INT_MODE_USE_MSI(efx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		/* Use an interrupt level unused by event queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		efx->irq_level = 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		/* Use a valid MSI-X vector */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		efx->irq_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	/* Enable all the genuinely fatal interrupts.  (They are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	 * masked by the overall interrupt mask, controlled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	 * falcon_interrupts()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	 * Note: All other fatal interrupts are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	EFX_POPULATE_OWORD_3(temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	EFX_INVERT_OWORD(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	/* Enable SW_EV to inherit in char driver - assume harmless here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	/* Disable hardware watchdog which can misfire */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	/* Squash TX of packets of 16 bytes or less */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	EFX_POPULATE_OWORD_4(temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			     /* Default values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			     FRF_BZ_TX_PACE_SB_AF, 0xb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 			     FRF_BZ_TX_PACE_FB_BASE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			     /* Allow large pace values in the fast bin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			     FRF_BZ_TX_PACE_BIN_TH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			     FFE_BZ_TX_PACE_RESERVED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	efx_writeo(efx, &temp, FR_BZ_TX_PACE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) /**************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  * Filter tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  **************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /* "Fudge factors" - difference between programmed value and actual depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  * Due to pipelined implementation we need to program H/W with a value that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  * is larger than the hop limit we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* Hard maximum search limit.  Hardware will time-out beyond 200-something.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * We also need to avoid infinite loops in efx_farch_filter_search() when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  * table is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* Don't try very hard to find space for performance hints, as this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  * counter-productive. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) enum efx_farch_filter_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	EFX_FARCH_FILTER_TCP_FULL = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	EFX_FARCH_FILTER_TCP_WILD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	EFX_FARCH_FILTER_UDP_FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	EFX_FARCH_FILTER_UDP_WILD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	EFX_FARCH_FILTER_MAC_FULL = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	EFX_FARCH_FILTER_MAC_WILD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	EFX_FARCH_FILTER_UC_DEF = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	EFX_FARCH_FILTER_MC_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	EFX_FARCH_FILTER_TYPE_COUNT,		/* number of specific types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) enum efx_farch_filter_table_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	EFX_FARCH_FILTER_TABLE_RX_IP = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	EFX_FARCH_FILTER_TABLE_RX_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	EFX_FARCH_FILTER_TABLE_RX_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	EFX_FARCH_FILTER_TABLE_TX_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	EFX_FARCH_FILTER_TABLE_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) enum efx_farch_filter_index {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	EFX_FARCH_FILTER_INDEX_UC_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	EFX_FARCH_FILTER_INDEX_MC_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	EFX_FARCH_FILTER_SIZE_RX_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct efx_farch_filter_spec {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	u8	type:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	u8	priority:4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	u8	flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	u16	dmaq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	u32	data[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) struct efx_farch_filter_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	enum efx_farch_filter_table_id id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	u32		offset;		/* address of table relative to BAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	unsigned	size;		/* number of entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	unsigned	step;		/* step between entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	unsigned	used;		/* number currently used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	unsigned long	*used_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	struct efx_farch_filter_spec *spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	unsigned	search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct efx_farch_filter_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	struct rw_semaphore lock; /* Protects table contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) efx_farch_filter_table_clear_entry(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				   struct efx_farch_filter_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				   unsigned int filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)  * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static u16 efx_farch_filter_hash(u32 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	u16 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	/* First 16 rounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	tmp = 0x1fff ^ key >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	tmp = tmp ^ tmp >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	/* Last 16 rounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	tmp = tmp ^ tmp << 13 ^ key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	return tmp ^ tmp >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) /* To allow for hash collisions, filter search continues at these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)  * increments from the first possible entry selected by the hash. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) static u16 efx_farch_filter_increment(u32 key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return key * 2 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static enum efx_farch_filter_table_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		     (EFX_FARCH_FILTER_TCP_FULL >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		     (EFX_FARCH_FILTER_TCP_WILD >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		     (EFX_FARCH_FILTER_UDP_FULL >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		     (EFX_FARCH_FILTER_UDP_WILD >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		     (EFX_FARCH_FILTER_MAC_FULL >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		     (EFX_FARCH_FILTER_MAC_WILD >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 		     EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	efx_oword_t filter_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			    table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			    EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			    table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			    EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 			    table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			    EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 			    table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 			    EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	if (table->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 			table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	if (table->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			   EFX_FILTER_FLAG_RX_RSS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 			!!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			   EFX_FILTER_FLAG_RX_RSS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		/* There is a single bit to enable RX scatter for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		 * unmatched packets.  Only set it if scatter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		 * enabled in both filter specs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			!!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			   table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			   EFX_FILTER_FLAG_RX_SCATTER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 		/* We don't expose 'default' filters because unmatched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		 * packets always go to the queue number found in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		 * RSS table.  But we still need to set the RX scatter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		 * bit here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			efx->rx_scatter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	efx_oword_t tx_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	if (table->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		EFX_SET_OWORD_FIELD(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 			table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 			EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			       const struct efx_filter_spec *gen_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	bool is_full = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	spec->priority = gen_spec->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	spec->flags = gen_spec->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	spec->dmaq_id = gen_spec->dmaq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	switch (gen_spec->match_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	      EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		is_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		__be32 rhost, host1, host2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		__be16 rport, port1, port2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		if (gen_spec->ether_type != htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		if (gen_spec->loc_port == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 		    (is_full && gen_spec->rem_port == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		switch (gen_spec->ip_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 				      EFX_FARCH_FILTER_TCP_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 				      EFX_FARCH_FILTER_UDP_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 			return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		/* Filter is constructed in terms of source and destination,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		 * with the odd wrinkle that the ports are swapped in a UDP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		 * wildcard filter.  We need to convert from local and remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		 * (= zero for wildcard) addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		rhost = is_full ? gen_spec->rem_host[0] : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		rport = is_full ? gen_spec->rem_port : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		host1 = rhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		host2 = gen_spec->loc_host[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 			port1 = gen_spec->loc_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			port2 = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 			port1 = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			port2 = gen_spec->loc_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		spec->data[2] = ntohl(host2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		is_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	case EFX_FILTER_MATCH_LOC_MAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			      EFX_FARCH_FILTER_MAC_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		spec->data[1] = (gen_spec->loc_mac[2] << 24 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 				 gen_spec->loc_mac[3] << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 				 gen_spec->loc_mac[4] << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 				 gen_spec->loc_mac[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		spec->data[2] = (gen_spec->loc_mac[0] << 8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 				 gen_spec->loc_mac[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	case EFX_FILTER_MATCH_LOC_MAC_IG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 			      EFX_FARCH_FILTER_MC_DEF :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 			      EFX_FARCH_FILTER_UC_DEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		return -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			     const struct efx_farch_filter_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	bool is_full = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	/* *gen_spec should be completely initialised, to be consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	 * with efx_filter_init_{rx,tx}() and in case we want to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	 * it back to userland.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	memset(gen_spec, 0, sizeof(*gen_spec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	gen_spec->priority = spec->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	gen_spec->flags = spec->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	gen_spec->dmaq_id = spec->dmaq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	switch (spec->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	case EFX_FARCH_FILTER_TCP_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	case EFX_FARCH_FILTER_UDP_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		is_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	case EFX_FARCH_FILTER_TCP_WILD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	case EFX_FARCH_FILTER_UDP_WILD: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		__be32 host1, host2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		__be16 port1, port2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		gen_spec->match_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			EFX_FILTER_MATCH_ETHER_TYPE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 			EFX_FILTER_MATCH_IP_PROTO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 			EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		if (is_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 			gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 						  EFX_FILTER_MATCH_REM_PORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		gen_spec->ether_type = htons(ETH_P_IP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		gen_spec->ip_proto =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 			(spec->type == EFX_FARCH_FILTER_TCP_FULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 			 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 			IPPROTO_TCP : IPPROTO_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		port1 = htons(spec->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		host2 = htonl(spec->data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		port2 = htons(spec->data[1] >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		if (spec->flags & EFX_FILTER_FLAG_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			gen_spec->loc_host[0] = host1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 			gen_spec->rem_host[0] = host2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 			gen_spec->loc_host[0] = host2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 			gen_spec->rem_host[0] = host1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 			gen_spec->loc_port = port1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 			gen_spec->rem_port = port2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			gen_spec->loc_port = port2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 			gen_spec->rem_port = port1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	case EFX_FARCH_FILTER_MAC_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		is_full = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	case EFX_FARCH_FILTER_MAC_WILD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		if (is_full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 			gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		gen_spec->loc_mac[0] = spec->data[2] >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		gen_spec->loc_mac[1] = spec->data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		gen_spec->loc_mac[2] = spec->data[1] >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		gen_spec->loc_mac[3] = spec->data[1] >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		gen_spec->loc_mac[4] = spec->data[1] >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		gen_spec->loc_mac[5] = spec->data[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		gen_spec->outer_vid = htons(spec->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	case EFX_FARCH_FILTER_UC_DEF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	case EFX_FARCH_FILTER_MC_DEF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) efx_farch_filter_init_rx_auto(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			      struct efx_farch_filter_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	/* If there's only one channel then disable RSS for non VF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	 * traffic, thereby allowing VFs to use RSS when the PF can't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	spec->priority = EFX_FILTER_PRI_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	spec->flags = (EFX_FILTER_FLAG_RX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		       (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		       (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	spec->dmaq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* Build a filter entry and return its n-tuple key. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) static u32 efx_farch_filter_build(efx_oword_t *filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 				  struct efx_farch_filter_spec *spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	u32 data3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	switch (efx_farch_filter_spec_table_id(spec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	case EFX_FARCH_FILTER_TABLE_RX_IP: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			       spec->type == EFX_FARCH_FILTER_UDP_WILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		EFX_POPULATE_OWORD_7(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 			*filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 			FRF_BZ_RSS_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			FRF_BZ_SCATTER_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			FRF_BZ_TCP_UDP, is_udp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			FRF_BZ_RXQ_ID, spec->dmaq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			EFX_DWORD_2, spec->data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			EFX_DWORD_1, spec->data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			EFX_DWORD_0, spec->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		data3 = is_udp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	case EFX_FARCH_FILTER_TABLE_RX_MAC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 		EFX_POPULATE_OWORD_7(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 			*filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			FRF_CZ_RMFT_RSS_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 			!!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 			FRF_CZ_RMFT_SCATTER_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			!!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		data3 = is_wild;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	case EFX_FARCH_FILTER_TABLE_TX_MAC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		EFX_POPULATE_OWORD_5(*filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		data3 = is_wild | spec->dmaq_id << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 				   const struct efx_farch_filter_spec *right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (left->type != right->type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	    memcmp(left->data, right->data, sizeof(left->data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	if (left->flags & EFX_FILTER_FLAG_TX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	    left->dmaq_id != right->dmaq_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)  * Construct/deconstruct external filter IDs.  At least the RX filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)  * IDs must be ordered by matching priority, for RX NFC semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)  * Deconstruction needs to be robust against invalid IDs so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)  * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)  * accept user-provided IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) #define EFX_FARCH_FILTER_MATCH_PRI_COUNT	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	[EFX_FARCH_FILTER_TCP_FULL]	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	[EFX_FARCH_FILTER_UDP_FULL]	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	[EFX_FARCH_FILTER_TCP_WILD]	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	[EFX_FARCH_FILTER_UDP_WILD]	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	[EFX_FARCH_FILTER_MAC_FULL]	= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	[EFX_FARCH_FILTER_MAC_WILD]	= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	[EFX_FARCH_FILTER_UC_DEF]	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	[EFX_FARCH_FILTER_MC_DEF]	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	EFX_FARCH_FILTER_TABLE_RX_IP,	/* RX match pri 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	EFX_FARCH_FILTER_TABLE_RX_IP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	EFX_FARCH_FILTER_TABLE_RX_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	EFX_FARCH_FILTER_TABLE_RX_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	EFX_FARCH_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	EFX_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	EFX_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) #define EFX_FARCH_FILTER_INDEX_WIDTH 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) static inline u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			 unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	range = efx_farch_filter_type_match_pri[spec->type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	if (!(spec->flags & EFX_FILTER_FLAG_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) static inline enum efx_farch_filter_table_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) efx_farch_filter_id_table_id(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (range < ARRAY_SIZE(efx_farch_filter_range_table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		return efx_farch_filter_range_table[range];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 		return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static inline unsigned int efx_farch_filter_id_index(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	return id & EFX_FARCH_FILTER_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		table_id = efx_farch_filter_range_table[range];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		if (state->table[table_id].size != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			return range << EFX_FARCH_FILTER_INDEX_WIDTH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 				state->table[table_id].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	} while (range--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) s32 efx_farch_filter_insert(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 			    struct efx_filter_spec *gen_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 			    bool replace_equal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	struct efx_farch_filter_spec spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	efx_oword_t filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	int rep_index, ins_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	unsigned int depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	table = &state->table[efx_farch_filter_spec_table_id(&spec)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	if (table->size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		   "%s: type %d search_limit=%d", __func__, spec.type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 		   table->search_limit[spec.type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		/* One filter spec per type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 		BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			     EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		ins_index = rep_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		/* Search concurrently for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		 * (1) a filter to be replaced (rep_index): any filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		 *     with the same match values, up to the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		 *     search depth for this type, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		 * (2) the insertion point (ins_index): (1) or any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		 *     free slot before it or up to the maximum search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		 *     depth for this priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		 * We fail if we cannot find (2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		 * We can stop once either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		 * (a) we find (1), in which case we have definitely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		 *     found (2) as well; or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		 * (b) we have searched exhaustively for (1), and have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		 *     either found (2) or searched exhaustively for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		u32 key = efx_farch_filter_build(&filter, &spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		unsigned int hash = efx_farch_filter_hash(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		unsigned int incr = efx_farch_filter_increment(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		unsigned int max_rep_depth = table->search_limit[spec.type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 		unsigned int max_ins_depth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			spec.priority <= EFX_FILTER_PRI_HINT ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 			EFX_FARCH_FILTER_CTL_SRCH_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		unsigned int i = hash & (table->size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		ins_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		depth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			if (!test_bit(i, table->used_bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 				if (ins_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 					ins_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 			} else if (efx_farch_filter_equal(&spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 							  &table->spec[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 				/* Case (a) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 				if (ins_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 					ins_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 				rep_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			if (depth >= max_rep_depth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			    (ins_index >= 0 || depth >= max_ins_depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 				/* Case (b) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 				if (ins_index < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 					rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 					goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 				rep_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 			i = (i + incr) & (table->size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			++depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	/* If we found a filter to be replaced, check whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	 * should do so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	if (rep_index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		struct efx_farch_filter_spec *saved_spec =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			&table->spec[rep_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 		if (spec.priority == saved_spec->priority && !replace_equal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			rc = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		if (spec.priority < saved_spec->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			rc = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 			goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		    saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 			spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	/* Insert the filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	if (ins_index != rep_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		__set_bit(ins_index, table->used_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		++table->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	table->spec[ins_index] = spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		if (table->search_limit[spec.type] < depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			table->search_limit[spec.type] = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 			if (spec.flags & EFX_FILTER_FLAG_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 				efx_farch_filter_push_tx_limits(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 				efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 		efx_writeo(efx, &filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 			   table->offset + table->step * ins_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		/* If we were able to replace a filter by inserting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 		 * at a lower depth, clear the replaced filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		if (ins_index != rep_index && rep_index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 			efx_farch_filter_table_clear_entry(efx, table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 							   rep_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	netif_vdbg(efx, hw, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		   "%s: filter type %d index %d rxq %u set",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		   __func__, spec.type, ins_index, spec.dmaq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	rc = efx_farch_filter_make_id(&spec, ins_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) efx_farch_filter_table_clear_entry(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 				   struct efx_farch_filter_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 				   unsigned int filter_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	static efx_oword_t filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	BUG_ON(table->offset == 0); /* can't clear MAC default filters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	__clear_bit(filter_idx, table->used_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	--table->used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	/* If this filter required a greater search depth than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	 * any other, the search limit for its type can now be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	 * decreased.  However, it is hard to determine that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	 * unless the table has become completely empty - in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	 * which case, all its search limits can be set to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	if (unlikely(table->used == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		memset(table->search_limit, 0, sizeof(table->search_limit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 			efx_farch_filter_push_tx_limits(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) static int efx_farch_filter_remove(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 				   struct efx_farch_filter_table *table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 				   unsigned int filter_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 				   enum efx_filter_priority priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	if (!test_bit(filter_idx, table->used_bitmap) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	    spec->priority != priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		efx_farch_filter_init_rx_auto(efx, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		efx_farch_filter_table_clear_entry(efx, table, filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) int efx_farch_filter_remove_safe(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 				 enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 				 u32 filter_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	table_id = efx_farch_filter_id_table_id(filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 	table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	filter_idx = efx_farch_filter_id_index(filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	if (filter_idx >= table->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) int efx_farch_filter_get_safe(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 			      enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 			      u32 filter_id, struct efx_filter_spec *spec_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	struct efx_farch_filter_spec *spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	int rc = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	down_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	table_id = efx_farch_filter_id_table_id(filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	filter_idx = efx_farch_filter_id_index(filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	if (filter_idx >= table->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	spec = &table->spec[filter_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	if (test_bit(filter_idx, table->used_bitmap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	    spec->priority == priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		efx_farch_filter_to_gen_spec(spec_buf, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	up_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) efx_farch_filter_table_clear(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 			     enum efx_farch_filter_table_id table_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 			     enum efx_filter_priority priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 	struct efx_farch_filter_table *table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 		if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 			efx_farch_filter_remove(efx, table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 						filter_idx, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) int efx_farch_filter_clear_rx(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			       enum efx_filter_priority priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 				     priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 				     priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 				     priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 				   enum efx_filter_priority priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	u32 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	down_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	     table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	     table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 			if (test_bit(filter_idx, table->used_bitmap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 			    table->spec[filter_idx].priority == priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 				++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	up_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 				enum efx_filter_priority priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 				u32 *buf, u32 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	s32 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	down_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 	for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	     table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	     table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 		table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			if (test_bit(filter_idx, table->used_bitmap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			    table->spec[filter_idx].priority == priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 				if (count == size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 					count = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 				buf[count++] = efx_farch_filter_make_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 					&table->spec[filter_idx], filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	up_read(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /* Restore filter stater after reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) void efx_farch_filter_table_restore(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	efx_oword_t filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 		table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		/* Check whether this is a regular register table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 		if (table->step == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 			if (!test_bit(filter_idx, table->used_bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			efx_farch_filter_build(&filter, &table->spec[filter_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 			efx_writeo(efx, &filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 				   table->offset + table->step * filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	efx_farch_filter_push_tx_limits(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) void efx_farch_filter_table_remove(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		kfree(state->table[table_id].used_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		vfree(state->table[table_id].spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int efx_farch_filter_table_probe(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	struct efx_farch_filter_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	unsigned table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	efx->filter_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	init_rwsem(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	table->offset = FR_BZ_RX_FILTER_TBL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	table->step = FR_BZ_RX_FILTER_TBL0_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 	table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 	table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 		if (table->size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 		table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 					     sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 					     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 		if (!table->used_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		table->spec = vzalloc(array_size(sizeof(*table->spec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 						 table->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 		if (!table->spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	if (table->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		/* RX default filters must always exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		struct efx_farch_filter_spec *spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 		unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 		for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 			spec = &table->spec[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 			spec->type = EFX_FARCH_FILTER_UC_DEF + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 			efx_farch_filter_init_rx_auto(efx, spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 			__set_bit(i, table->used_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	efx_farch_filter_table_remove(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /* Update scatter enable flags for filters pointing to our own RX queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	enum efx_farch_filter_table_id table_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	efx_oword_t filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	unsigned int filter_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 	for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	     table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	     table_id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		table = &state->table[table_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 			if (!test_bit(filter_idx, table->used_bitmap) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 			    table->spec[filter_idx].dmaq_id >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 			    efx->n_rx_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 			if (efx->rx_scatter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 				table->spec[filter_idx].flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 					EFX_FILTER_FLAG_RX_SCATTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 				table->spec[filter_idx].flags &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 					~EFX_FILTER_FLAG_RX_SCATTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 			if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 				/* Pushed by efx_farch_filter_push_rx_config() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 			efx_farch_filter_build(&filter, &table->spec[filter_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 			efx_writeo(efx, &filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 				   table->offset + table->step * filter_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	efx_farch_filter_push_rx_config(efx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) #ifdef CONFIG_RFS_ACCEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 				     unsigned int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	struct efx_farch_filter_state *state = efx->filter_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	struct efx_farch_filter_table *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	bool ret = false, force = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 	u16 arfs_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	down_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	spin_lock_bh(&efx->rps_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	if (test_bit(index, table->used_bitmap) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	    table->spec[index].priority == EFX_FILTER_PRI_HINT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		struct efx_arfs_rule *rule = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		struct efx_filter_spec spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 		if (!efx->rps_hash_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 			/* In the absence of the table, we always returned 0 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 			 * ARFS, so use the same to query it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 			arfs_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			rule = efx_rps_hash_find(efx, &spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 			if (!rule) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 				/* ARFS table doesn't know of this filter, remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 				force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 				arfs_id = rule->arfs_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 				if (!efx_rps_check_rule(rule, index, &force))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 					goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 						 flow_id, arfs_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 			if (rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 				rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			efx_rps_hash_del(efx, &spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 			efx_farch_filter_table_clear_entry(efx, table, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	spin_unlock_bh(&efx->rps_hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 	up_write(&state->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) #endif /* CONFIG_RFS_ACCEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	struct net_device *net_dev = efx->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	u32 crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	int bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	if (!efx_dev_registered(efx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	netif_addr_lock_bh(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	/* Build multicast hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 		memset(mc_hash, 0xff, sizeof(*mc_hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		memset(mc_hash, 0x00, sizeof(*mc_hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		netdev_for_each_mc_addr(ha, net_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			crc = ether_crc_le(ETH_ALEN, ha->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 			bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 			__set_bit_le(bit, mc_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		/* Broadcast packets go through the multicast hash filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		 * ether_crc_le() of the broadcast address is 0xbe2612ff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 		 * so we always add bit 0xff to the mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		__set_bit_le(0xff, mc_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	netif_addr_unlock_bh(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }