^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /****************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for Solarflare network controllers and boards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2005-2006 Fen Systems Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2005-2013 Solarflare Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bpf_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "net_driver.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "efx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "rx_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "filter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "nic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "selftest.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "workarounds.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /* Preferred number of descriptors to fill at once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define EFX_RX_PREFERRED_BATCH 8U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Maximum rx prefix used by any architecture. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define EFX_MAX_RX_PREFIX_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Size of buffer allocated for skb header area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define EFX_SKB_HEADERS 128u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) EFX_RX_USR_BUF_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct efx_rx_buffer *rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (likely(len <= max_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* The packet must be discarded, but this is only a fatal error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * if the caller indicated it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) rx_buf->flags |= EFX_RX_PKT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "RX queue %d overlength RX event (%#x > %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) efx_rx_queue_index(rx_queue), len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Allocate and construct an SKB around page fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct efx_rx_buffer *rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int n_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u8 *eh, int hdr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Allocate an SKB to store the headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) skb = netdev_alloc_skb(efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) efx->rx_ip_align + efx->rx_prefix_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) atomic_inc(&efx->n_rx_noskb_drops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) efx->rx_prefix_size + hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) __skb_put(skb, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Append the remaining page(s) onto the frag list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (rx_buf->len > hdr_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rx_buf->page_offset += hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rx_buf->len -= hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rx_buf->page, rx_buf->page_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rx_buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rx_buf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) skb->len += rx_buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) skb->data_len += rx_buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (skb_shinfo(skb)->nr_frags == n_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __free_pages(rx_buf->page, efx->rx_buffer_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rx_buf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) n_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) skb->truesize += n_frags * efx->rx_buffer_truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Move past the ethernet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) skb->protocol = eth_type_trans(skb, efx->net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) skb_mark_napi_id(skb, &channel->napi_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned int n_frags, unsigned int len, u16 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct efx_nic *efx = rx_queue->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct efx_rx_buffer *rx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rx_queue->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rx_buf = efx_rx_buffer(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rx_buf->flags |= flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Validate the number of fragments and completed length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (n_frags == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!(flags & EFX_RX_PKT_PREFIX_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) efx_rx_packet__check_len(rx_queue, rx_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unlikely(len > n_frags * efx->rx_dma_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unlikely(!efx->rx_scatter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* If this isn't an explicit discard request, either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * the hardware or the driver is broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) rx_buf->flags |= EFX_RX_PKT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) netif_vdbg(efx, rx_status, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "RX queue %d received ids %x-%x len %d %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) efx_rx_queue_index(rx_queue), index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) (index + n_frags - 1) & rx_queue->ptr_mask, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Discard packet, if instructed to do so. Process the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * previous receive first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) efx_rx_flush_packet(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) efx_discard_rx_packet(channel, rx_buf, n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) rx_buf->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Release and/or sync the DMA mapping - assumes all RX buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * consumed in-order per RX queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Prefetch nice and early so data will (hopefully) be in cache by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * the time we look at it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) prefetch(efx_rx_buf_va(rx_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rx_buf->page_offset += efx->rx_prefix_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rx_buf->len -= efx->rx_prefix_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (n_frags > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Release/sync DMA mapping for additional fragments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Fix length for last fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int tail_frags = n_frags - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (--tail_frags == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* All fragments have been DMA-synced, so recycle pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rx_buf = efx_rx_buffer(rx_queue, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) efx_recycle_rx_pages(channel, rx_buf, n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Pipeline receives so that we give time for packet headers to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * prefetched into cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) efx_rx_flush_packet(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) channel->rx_pkt_n_frags = n_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) channel->rx_pkt_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct efx_rx_buffer *rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned int n_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) skb_record_rx_queue(skb, channel->rx_queue.core_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Set the SKB flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) efx_rx_skb_attach_timestamp(channel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (channel->type->receive_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (channel->type->receive_skb(channel, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /* Pass the packet up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (channel->rx_list != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Add to list, will pass up later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) list_add_tail(&skb->list, channel->rx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* No list, so pass it up now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /** efx_do_xdp: perform XDP processing on a received packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * Returns true if packet should still be delivered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct efx_rx_buffer *rx_buf, u8 **ehp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u32 xdp_act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) s16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) xdp_prog = rcu_dereference(efx->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (!xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (unlikely(channel->rx_pkt_n_frags > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* We can't do XDP on fragmented packets - drop. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) efx_free_rx_buffers(rx_queue, rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) channel->rx_pkt_n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) "XDP is not possible with multiple receive fragments (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) channel->rx_pkt_n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) channel->n_rx_xdp_bad_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rx_buf->len, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Save the rx prefix. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) efx->rx_prefix_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xdp.data = *ehp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* No support yet for XDP metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) xdp_set_data_meta_invalid(&xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xdp.data_end = xdp.data + rx_buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xdp.rxq = &rx_queue->xdp_rxq_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) xdp.frame_sz = efx->rx_page_buf_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) offset = (u8 *)xdp.data - *ehp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) switch (xdp_act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Fix up rx prefix. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *ehp += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rx_buf->page_offset += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rx_buf->len -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) efx->rx_prefix_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case XDP_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Buffer ownership passes to tx on success. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) xdpf = xdp_convert_buff_to_frame(&xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (unlikely(err != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) efx_free_rx_buffers(rx_queue, rx_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) "XDP TX failed (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) channel->n_rx_xdp_bad_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) channel->n_rx_xdp_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) efx_free_rx_buffers(rx_queue, rx_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) netif_err(efx, rx_err, efx->net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) "XDP redirect failed (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) channel->n_rx_xdp_bad_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) channel->n_rx_xdp_redirect++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) bpf_warn_invalid_xdp_action(xdp_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) efx_free_rx_buffers(rx_queue, rx_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) channel->n_rx_xdp_bad_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) case XDP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) efx_free_rx_buffers(rx_queue, rx_buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) channel->n_rx_xdp_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return xdp_act == XDP_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* Handle a received packet. Second half: Touches packet payload. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void __efx_rx_packet(struct efx_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct efx_nic *efx = channel->efx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct efx_rx_buffer *rx_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u8 *eh = efx_rx_buf_va(rx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Read length from the prefix if necessary. This already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * excludes the length of the prefix itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) rx_buf->len = le16_to_cpup((__le16 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) (eh + efx->rx_packet_len_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* If we're in loopback test, then pass the packet directly to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * loopback layer, and free the rx_buf here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (unlikely(efx->loopback_selftest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct efx_rx_queue *rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) efx_loopback_rx_packet(efx, eh, rx_buf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rx_queue = efx_channel_get_rx_queue(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) efx_free_rx_buffers(rx_queue, rx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) channel->rx_pkt_n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!efx_do_xdp(efx, channel, rx_buf, &eh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) channel->rx_pkt_n_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }