^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* A network driver using virtio.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) //#define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/virtio_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bpf_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/average.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/filter.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/net_failover.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int napi_weight = NAPI_POLL_WEIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) module_param(napi_weight, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static bool csum = true, gso = true, napi_tx = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) module_param(csum, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) module_param(gso, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) module_param(napi_tx, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* FIXME: MTU in config. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define GOOD_COPY_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define VIRTIO_XDP_HEADROOM 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Separating two types of XDP xmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define VIRTIO_XDP_TX BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define VIRTIO_XDP_REDIR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define VIRTIO_XDP_FLAG BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* RX packet size EWMA. The average packet size is used to determine the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * buffer size when refilling RX rings. As the entire RX ring may be refilled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * at once, the weight is chosen so that the EWMA will be insensitive to short-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * term, transient changes in packet size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) DECLARE_EWMA(pkt_len, 0, 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define VIRTNET_DRIVER_VERSION "1.0.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static const unsigned long guest_offloads[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) VIRTIO_NET_F_GUEST_TSO4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) VIRTIO_NET_F_GUEST_TSO6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) VIRTIO_NET_F_GUEST_ECN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) VIRTIO_NET_F_GUEST_UFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) VIRTIO_NET_F_GUEST_CSUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) (1ULL << VIRTIO_NET_F_GUEST_UFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct virtnet_stat_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) char desc[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct virtnet_sq_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct u64_stats_sync syncp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u64 packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u64 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u64 xdp_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u64 xdp_tx_drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u64 kicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct virtnet_rq_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct u64_stats_sync syncp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) u64 packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u64 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u64 xdp_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u64 xdp_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u64 xdp_redirects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u64 xdp_drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u64 kicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { "packets", VIRTNET_SQ_STAT(packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) { "bytes", VIRTNET_SQ_STAT(bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { "kicks", VIRTNET_SQ_STAT(kicks) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { "packets", VIRTNET_RQ_STAT(packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) { "bytes", VIRTNET_RQ_STAT(bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) { "drops", VIRTNET_RQ_STAT(drops) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) { "kicks", VIRTNET_RQ_STAT(kicks) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Internal representation of a send virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct send_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Virtqueue associated with this send _queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* TX: fragments + linear part + virtio header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct scatterlist sg[MAX_SKB_FRAGS + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Name of the send queue: output.$index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) char name[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct virtnet_sq_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Internal representation of a receive virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct receive_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Virtqueue associated with this receive_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct bpf_prog __rcu *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct virtnet_rq_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Chain pages by the private ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Average packet length for mergeable receive buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct ewma_pkt_len mrg_avg_pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Page frag for packet buffer allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct page_frag alloc_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* RX: fragments + linear part + virtio header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct scatterlist sg[MAX_SKB_FRAGS + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Min single buffer size for mergeable buffers case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int min_buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Name of this receive queue: input.$index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) char name[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct xdp_rxq_info xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Control VQ buffers: protected by the rtnl lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct control_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct virtio_net_ctrl_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) virtio_net_ctrl_ack status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct virtio_net_ctrl_mq mq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u8 promisc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u8 allmulti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __virtio16 vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __virtio64 offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct virtnet_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct virtqueue *cvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct send_queue *sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct receive_queue *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Max # of queue pairs supported by the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u16 max_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* # of queue pairs currently used by the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u16 curr_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* # of XDP queue pairs currently used by the driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u16 xdp_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bool xdp_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* I like... big packets and I cannot lie! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bool big_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Host will merge rx buffers for big packets (shake it! shake it!) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bool mergeable_rx_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Has control virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bool has_cvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Host can handle any s/g split between our header and packet data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool any_header_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Packet virtio header size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u8 hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Work struct for refilling if we run low on memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct delayed_work refill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Work struct for config space updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct work_struct config_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Does the affinity hint is set for virtqueues? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) bool affinity_hint_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* CPU hotplug instances for online & dead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct hlist_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct hlist_node node_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct control_buf *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Ethtool settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u8 duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned long guest_offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) unsigned long guest_offloads_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* failover when STANDBY feature enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct failover *failover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct padded_vnet_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct virtio_net_hdr_mrg_rxbuf hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * hdr is in a separate sg buffer, and data sg buffer shares same page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * with this header sg. This padding makes next sg 16 byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * after the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) char padding[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static bool is_xdp_frame(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return (unsigned long)ptr & VIRTIO_XDP_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void *xdp_to_ptr(struct xdp_frame *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static struct xdp_frame *ptr_to_xdp(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Converting between virtqueue no. and kernel tx/rx queue no.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static int vq2txq(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return (vq->index - 1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static int txq2vq(int txq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return txq * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int vq2rxq(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return vq->index / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static int rxq2vq(int rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return rxq * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * private is used to chain pages for big packets, put the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * most recent used list in the beginning for reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void give_pages(struct receive_queue *rq, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct page *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Find end of list, sew whole thing into vi->rq.pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) for (end = page; end->private; end = (struct page *)end->private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) end->private = (unsigned long)rq->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rq->pages = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct page *p = rq->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) rq->pages = (struct page *)p->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* clear private here, it is used to chain pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) p->private = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) p = alloc_page(gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static void virtqueue_napi_schedule(struct napi_struct *napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (napi_schedule_prep(napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __napi_schedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static void virtqueue_napi_complete(struct napi_struct *napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct virtqueue *vq, int processed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) opaque = virtqueue_enable_cb_prepare(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (napi_complete_done(napi, processed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (unlikely(virtqueue_poll(vq, opaque)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) virtqueue_napi_schedule(napi, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void skb_xmit_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct virtnet_info *vi = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Suppress further interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (napi->weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) virtqueue_napi_schedule(napi, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* We were probably waiting for more output buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) netif_wake_subqueue(vi->dev, vq2txq(vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define MRG_CTX_HEADER_SHIFT 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void *mergeable_len_to_ctx(unsigned int truesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned int headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Called from bottom half context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static struct sk_buff *page_to_skb(struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) unsigned int len, unsigned int truesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) bool hdr_valid, unsigned int metasize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct virtio_net_hdr_mrg_rxbuf *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned int copy, hdr_len, hdr_padded_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) p = page_address(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* copy small packet so we can reuse these pages for small data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) hdr = skb_vnet_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hdr_len = vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (vi->mergeable_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) hdr_padded_len = sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) hdr_padded_len = sizeof(struct padded_vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* hdr_valid means no XDP, so we can copy the vnet header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (hdr_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) memcpy(hdr, p, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) len -= hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) offset += hdr_padded_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) p += hdr_padded_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Copy all frame if it fits skb->head, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (len <= skb_tailroom(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) copy = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) copy = ETH_HLEN + metasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) skb_put_data(skb, p, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (metasize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __skb_pull(skb, metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) skb_metadata_set(skb, metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) offset += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (vi->mergeable_rx_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) skb_add_rx_frag(skb, 0, page, offset, len, truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Verify that we can indeed put this data into a skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * This is here to handle cases when the device erroneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * tries to receive more than is possible. This is usually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * the case of a broken device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) BUG_ON(offset >= PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) frag_size, truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) len -= frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) page = (struct page *)page->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) give_pages(rq, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct send_queue *sq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct xdp_frame *xdpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct virtio_net_hdr_mrg_rxbuf *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (unlikely(xdpf->headroom < vi->hdr_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Make room for virtqueue hdr (also change xdpf->headroom?) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) xdpf->data -= vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Zero header and leave csum up to XDP layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) hdr = xdpf->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) memset(hdr, 0, vi->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) xdpf->len += vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) sg_init_one(sq->sg, xdpf->data, xdpf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -ENOSPC; /* Caller handle free/refcnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * the current cpu, so it does not need to be locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Here we use marco instead of inline functions because we have to deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * three issues at the same time: 1. the choice of sq. 2. judge and execute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * functions to perfectly solve these three problems at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define virtnet_xdp_get_sq(vi) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct netdev_queue *txq; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) typeof(vi) v = (vi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) unsigned int qp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (v->curr_queue_pairs > nr_cpu_ids) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) qp += smp_processor_id(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) txq = netdev_get_tx_queue(v->dev, qp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) __netif_tx_acquire(txq); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) qp = smp_processor_id() % v->curr_queue_pairs; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) txq = netdev_get_tx_queue(v->dev, qp); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __netif_tx_lock(txq, raw_smp_processor_id()); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) v->sq + qp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define virtnet_xdp_put_sq(vi, q) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct netdev_queue *txq; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) typeof(vi) v = (vi); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (v->curr_queue_pairs > nr_cpu_ids) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) __netif_tx_release(txq); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) __netif_tx_unlock(txq); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int virtnet_xdp_xmit(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) int n, struct xdp_frame **frames, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct receive_queue *rq = vi->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct send_queue *sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int kicks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int ret, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * indicate XDP resources have been successfully allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xdp_prog = rcu_access_pointer(rq->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!xdp_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) sq = virtnet_xdp_get_sq(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) drops = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* Free up any pending old buffers before queueing new ones. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (likely(is_xdp_frame(ptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct xdp_frame *frame = ptr_to_xdp(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) bytes += frame->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) xdp_return_frame(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct sk_buff *skb = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) napi_consume_skb(skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct xdp_frame *xdpf = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) xdp_return_frame_rx_napi(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ret = n - drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (flags & XDP_XMIT_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) kicks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u64_stats_update_begin(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) sq->stats.bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) sq->stats.packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) sq->stats.xdp_tx += n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) sq->stats.xdp_tx_drops += drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) sq->stats.kicks += kicks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) u64_stats_update_end(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) virtnet_xdp_put_sq(vi, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* We copy the packet for XDP in the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * 1) Packet is scattered across multiple rx buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * 2) Headroom space is insufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * This is inefficient but it's a temporary condition that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * we hit right after XDP is enabled and until queue is refilled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * with large buffers with sufficient headroom - so it should affect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * at most queue size packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Afterwards, the conditions to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * XDP should preclude the underlying device from sending packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * across multiple buffers (num_buf > 1), and we make sure buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * have enough headroom.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static struct page *xdp_linearize_page(struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) u16 *num_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct page *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) int page_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct page *page = alloc_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) page_off += *len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) while (--*num_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned int buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) buf = virtqueue_get_buf(rq->vq, &buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (unlikely(!buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) p = virt_to_head_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) off = buf - page_address(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* guard against a misconfigured or uncooperative backend that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * is sending packet larger than the MTU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if ((page_off + buflen + tailroom) > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) put_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) memcpy(page_address(page) + page_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) page_address(p) + off, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) page_off += buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) put_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* Headroom does not contribute to packet length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *len = page_off - VIRTIO_XDP_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) __free_pages(page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static struct sk_buff *receive_small(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) void *buf, void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int *xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct virtnet_rq_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned int xdp_headroom = (unsigned long)ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) unsigned int headroom = vi->hdr_len + header_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct page *page = virt_to_head_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned int delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct page *xdp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned int metasize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) len -= vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) stats->bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (unlikely(len > GOOD_PACKET_LEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) pr_debug("%s: rx error: len %u exceeds max size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev->name, len, GOOD_PACKET_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto err_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) xdp_prog = rcu_dereference(rq->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) void *orig_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (unlikely(hdr->hdr.gso_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int offset = buf - page_address(page) + header_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) unsigned int tlen = len + vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) u16 num_buf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) xdp_headroom = virtnet_get_headroom(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) header_offset = VIRTNET_RX_PAD + xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) headroom = vi->hdr_len + header_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) xdp_page = xdp_linearize_page(rq, &num_buf, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) offset, header_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) &tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (!xdp_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) buf = page_address(xdp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) page = xdp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) xdp.data = xdp.data_hard_start + xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) xdp.data_end = xdp.data + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) xdp.data_meta = xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xdp.rxq = &rq->xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xdp.frame_sz = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) orig_data = xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) act = bpf_prog_run_xdp(xdp_prog, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) stats->xdp_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) switch (act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Recalculate length in case bpf program changed it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) delta = orig_data - xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) len = xdp.data_end - xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) metasize = xdp.data - xdp.data_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case XDP_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) stats->xdp_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) xdpf = xdp_convert_buff_to_frame(&xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (unlikely(!xdpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) trace_xdp_exception(vi->dev, xdp_prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *xdp_xmit |= VIRTIO_XDP_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) goto xdp_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) stats->xdp_redirects++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) err = xdp_do_redirect(dev, &xdp, xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) *xdp_xmit |= VIRTIO_XDP_REDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) goto xdp_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bpf_warn_invalid_xdp_action(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) case XDP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) trace_xdp_exception(vi->dev, xdp_prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) skb = build_skb(buf, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) skb_reserve(skb, headroom - delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) skb_put(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) buf += header_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) } /* keep zeroed vnet hdr since XDP is loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (metasize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) skb_metadata_set(skb, metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) err_xdp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) stats->xdp_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) err_len:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) stats->drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) xdp_xmit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static struct sk_buff *receive_big(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct virtnet_rq_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct page *page = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct sk_buff *skb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) stats->bytes += len - vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) stats->drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) give_pages(rq, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static struct sk_buff *receive_mergeable(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned int *xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct virtnet_rq_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct page *page = virt_to_head_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) int offset = buf - page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct sk_buff *head_skb, *curr_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned int truesize = mergeable_ctx_to_truesize(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned int headroom = mergeable_ctx_to_headroom(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) unsigned int metasize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) unsigned int frame_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) head_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) stats->bytes += len - vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (unlikely(len > truesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) dev->name, len, (unsigned long)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) goto err_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) xdp_prog = rcu_dereference(rq->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct page *xdp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /* Transient failure which in theory could occur if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * in-flight packets from before XDP was enabled reach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * the receive path after XDP is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (unlikely(hdr->hdr.gso_type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /* Buffers with headroom use PAGE_SIZE as alloc size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * see add_recvbuf_mergeable() + get_mergeable_buf_len()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) frame_sz = headroom ? PAGE_SIZE : truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* This happens when rx buffer size is underestimated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * or headroom is not enough because of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * was refilled before XDP is set. This should only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * happen for the first several packets, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * care much about its performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (unlikely(num_buf > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) headroom < virtnet_get_headroom(vi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* linearize data for XDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) xdp_page = xdp_linearize_page(rq, &num_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) page, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) VIRTIO_XDP_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) frame_sz = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (!xdp_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) offset = VIRTIO_XDP_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xdp_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* Allow consuming headroom but reserve enough space to push
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * the descriptor on if we get an XDP_TX return code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) data = page_address(xdp_page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) xdp.data = data + vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) xdp.data_end = xdp.data + (len - vi->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) xdp.data_meta = xdp.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) xdp.rxq = &rq->xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) xdp.frame_sz = frame_sz - vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) act = bpf_prog_run_xdp(xdp_prog, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) stats->xdp_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) switch (act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) metasize = xdp.data - xdp.data_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* recalculate offset to account for any header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * adjustments and minus the metasize to copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * metadata in page_to_skb(). Note other cases do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * build an skb and avoid using offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) offset = xdp.data - page_address(xdp_page) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) vi->hdr_len - metasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* recalculate len if xdp.data, xdp.data_end or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * xdp.data_meta were adjusted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* We can only create skb based on xdp_page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (unlikely(xdp_page != page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) head_skb = page_to_skb(vi, rq, xdp_page, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) len, PAGE_SIZE, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return head_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case XDP_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) stats->xdp_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) xdpf = xdp_convert_buff_to_frame(&xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (unlikely(!xdpf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) trace_xdp_exception(vi->dev, xdp_prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (unlikely(xdp_page != page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) put_page(xdp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *xdp_xmit |= VIRTIO_XDP_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (unlikely(xdp_page != page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto xdp_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) stats->xdp_redirects++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) err = xdp_do_redirect(dev, &xdp, xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (unlikely(xdp_page != page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) put_page(xdp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *xdp_xmit |= VIRTIO_XDP_REDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (unlikely(xdp_page != page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) goto xdp_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) bpf_warn_invalid_xdp_action(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) case XDP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) trace_xdp_exception(vi->dev, xdp_prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (unlikely(xdp_page != page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) __free_pages(xdp_page, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) goto err_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) metasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) curr_skb = head_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (unlikely(!curr_skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) goto err_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) while (--num_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) int num_skb_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (unlikely(!buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) pr_debug("%s: rx error: %d buffers out of %d missing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dev->name, num_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) virtio16_to_cpu(vi->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) hdr->num_buffers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) goto err_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) stats->bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) page = virt_to_head_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) truesize = mergeable_ctx_to_truesize(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (unlikely(len > truesize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) dev->name, len, (unsigned long)ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) goto err_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (unlikely(!nskb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) goto err_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (curr_skb == head_skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) skb_shinfo(curr_skb)->frag_list = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) curr_skb->next = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) curr_skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) head_skb->truesize += nskb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) num_skb_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (curr_skb != head_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) head_skb->data_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) head_skb->len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) head_skb->truesize += truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) offset = buf - page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) len, truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) skb_add_rx_frag(curr_skb, num_skb_frags, page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) offset, len, truesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return head_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) err_xdp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) stats->xdp_drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) err_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) while (num_buf-- > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) buf = virtqueue_get_buf(rq->vq, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (unlikely(!buf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) pr_debug("%s: rx error: %d buffers missing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dev->name, num_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) stats->bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) page = virt_to_head_page(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) err_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) stats->drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) dev_kfree_skb(head_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) xdp_xmit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) void *buf, unsigned int len, void **ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) unsigned int *xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct virtnet_rq_stats *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct net_device *dev = vi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct virtio_net_hdr_mrg_rxbuf *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) pr_debug("%s: short packet %i\n", dev->name, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) dev->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (vi->mergeable_rx_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) } else if (vi->big_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) give_pages(rq, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (vi->mergeable_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) else if (vi->big_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) skb = receive_big(dev, vi, rq, buf, len, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) hdr = skb_vnet_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) virtio_is_little_endian(vi->vdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dev->name, hdr->hdr.gso_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) hdr->hdr.gso_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) goto frame_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) skb_record_rx_queue(skb, vq2rxq(rq->vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ntohs(skb->protocol), skb->len, skb->pkt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) napi_gro_receive(&rq->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) frame_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) dev->stats.rx_frame_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Unlike mergeable buffers, all buffers are allocated to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * same size, except for the headroom. For this reason we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * not need to use mergeable_len_to_ctx here - it is enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * to store the headroom as the context ignoring the truesize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct page_frag *alloc_frag = &rq->alloc_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) unsigned int xdp_headroom = virtnet_get_headroom(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) void *ctx = (void *)(unsigned long)xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) len = SKB_DATA_ALIGN(len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) get_page(alloc_frag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) alloc_frag->offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) vi->hdr_len + GOOD_PACKET_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct page *first, *list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int i, err, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) first = get_a_page(rq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) give_pages(rq, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* chain new page in list head to match sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) first->private = (unsigned long)list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) list = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) first = get_a_page(rq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) give_pages(rq, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) p = page_address(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* rq->sg[0], rq->sg[1] share the same page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* a separated rq->sg[0] for header - required in case !any_header_sg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) sg_set_buf(&rq->sg[0], p, vi->hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* rq->sg[1] for data packet, from offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) offset = sizeof(struct padded_vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* chain first in list head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) first->private = (unsigned long)list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) first, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) give_pages(rq, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) struct ewma_pkt_len *avg_pkt_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unsigned int room)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (room)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return PAGE_SIZE - room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) rq->min_buf_len, PAGE_SIZE - hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) return ALIGN(len, L1_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) static int add_recvbuf_mergeable(struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct receive_queue *rq, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct page_frag *alloc_frag = &rq->alloc_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) unsigned int headroom = virtnet_get_headroom(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) unsigned int len, hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Extra tailroom is needed to satisfy XDP's assumption. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * means rx frags coalescing won't work, but consider we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * disabled GSO for XDP, it won't be a big issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) buf += headroom; /* advance address leaving hole at front of pkt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) get_page(alloc_frag->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) alloc_frag->offset += len + room;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) hole = alloc_frag->size - alloc_frag->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (hole < len + room) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /* To avoid internal fragmentation, if there is very likely not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * enough space for another buffer, add the remaining space to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * the current buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) len += hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) alloc_frag->offset += hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) sg_init_one(rq->sg, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) ctx = mergeable_len_to_ctx(len, headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * Returns false if we couldn't fill entirely (OOM).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * Normally run in the receive path, but can also be run from ndo_open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * before we're receiving packets, or from refill_work which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * careful to disable receiving (using napi_disable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) bool oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (vi->mergeable_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) err = add_recvbuf_mergeable(vi, rq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) else if (vi->big_packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) err = add_recvbuf_big(vi, rq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) err = add_recvbuf_small(vi, rq, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) oom = err == -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) } while (rq->vq->num_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) flags = u64_stats_update_begin_irqsave(&rq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) rq->stats.kicks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return !oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void skb_recv_done(struct virtqueue *rvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct virtnet_info *vi = rvq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) virtqueue_napi_schedule(&rq->napi, rvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) napi_enable(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* If all buffers were filled by other side before we napi_enabled, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * won't get another interrupt, so process any outstanding packets now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * Call local_bh_enable after to trigger softIRQ processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) virtqueue_napi_schedule(napi, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static void virtnet_napi_tx_enable(struct virtnet_info *vi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct napi_struct *napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (!napi->weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * enable the feature if this is likely affine with the transmit path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!vi->affinity_hint_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) napi->weight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return virtnet_napi_enable(vq, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void virtnet_napi_tx_disable(struct napi_struct *napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (napi->weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) napi_disable(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) static void refill_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct virtnet_info *vi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) container_of(work, struct virtnet_info, refill.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) bool still_empty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct receive_queue *rq = &vi->rq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) napi_disable(&rq->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) virtnet_napi_enable(rq->vq, &rq->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* In theory, this can happen: if we don't get any buffers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * we will *never* try to fill again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (still_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) schedule_delayed_work(&vi->refill, HZ/2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static int virtnet_receive(struct receive_queue *rq, int budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) unsigned int *xdp_xmit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct virtnet_info *vi = rq->vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct virtnet_rq_stats stats = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (!vi->big_packets || vi->mergeable_rx_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) void *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) while (stats.packets < budget &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) while (stats.packets < budget &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) stats.packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (!try_fill_recv(vi, rq, GFP_ATOMIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) schedule_delayed_work(&vi->refill, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) u64_stats_update_begin(&rq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) size_t offset = virtnet_rq_stats_desc[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) u64 *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) item = (u64 *)((u8 *)&rq->stats + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) *item += *(u64 *)((u8 *)&stats + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) u64_stats_update_end(&rq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return stats.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) unsigned int packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (likely(!is_xdp_frame(ptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct sk_buff *skb = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) pr_debug("Sent skb %p\n", skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) napi_consume_skb(skb, in_napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct xdp_frame *frame = ptr_to_xdp(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) bytes += frame->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) xdp_return_frame(frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) /* Avoid overhead when no packets have been processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * happens when called speculatively from start_xmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (!packets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) u64_stats_update_begin(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) sq->stats.bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) sq->stats.packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) u64_stats_update_end(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) else if (q < vi->curr_queue_pairs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void virtnet_poll_cleantx(struct receive_queue *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct virtnet_info *vi = rq->vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) unsigned int index = vq2rxq(rq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct send_queue *sq = &vi->sq[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (__netif_tx_trylock(txq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) free_old_xmit_skbs(sq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) __netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static int virtnet_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct receive_queue *rq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) container_of(napi, struct receive_queue, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) struct virtnet_info *vi = rq->vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) struct send_queue *sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) unsigned int received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) unsigned int xdp_xmit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) virtnet_poll_cleantx(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) received = virtnet_receive(rq, budget, &xdp_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* Out of packets? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (received < budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) virtqueue_napi_complete(napi, rq->vq, received);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (xdp_xmit & VIRTIO_XDP_REDIR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) xdp_do_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (xdp_xmit & VIRTIO_XDP_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sq = virtnet_xdp_get_sq(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) u64_stats_update_begin(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) sq->stats.kicks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) u64_stats_update_end(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) virtnet_xdp_put_sq(vi, sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) static int virtnet_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (i < vi->curr_queue_pairs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Make sure we have some buffers: if oom use wq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) schedule_delayed_work(&vi->refill, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) MEM_TYPE_PAGE_SHARED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static int virtnet_poll_tx(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct send_queue *sq = container_of(napi, struct send_queue, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct virtnet_info *vi = sq->vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) unsigned int index = vq2txq(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) int opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) bool done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* We don't need to enable cb for XDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) napi_complete_done(napi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) txq = netdev_get_tx_queue(vi->dev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) __netif_tx_lock(txq, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) virtqueue_disable_cb(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) free_old_xmit_skbs(sq, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) opaque = virtqueue_enable_cb_prepare(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) done = napi_complete_done(napi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (!done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) virtqueue_disable_cb(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) __netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (unlikely(virtqueue_poll(sq->vq, opaque))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (napi_schedule_prep(napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) __netif_tx_lock(txq, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) virtqueue_disable_cb(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) __netif_tx_unlock(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) __napi_schedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) struct virtio_net_hdr_mrg_rxbuf *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct virtnet_info *vi = sq->vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) int num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) unsigned hdr_len = vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) bool can_push;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) can_push = vi->any_header_sg &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Even if we can, don't push here yet as this would skew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * csum_start offset below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) if (can_push)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) hdr = skb_vnet_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) virtio_is_little_endian(vi->vdev), false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (vi->mergeable_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) hdr->num_buffers = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (can_push) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) __skb_push(skb, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (unlikely(num_sg < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) return num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* Pull header back to avoid skew in tx bytes calculations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) __skb_pull(skb, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) sg_set_buf(sq->sg, hdr, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (unlikely(num_sg < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return num_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) num_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) int qnum = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct send_queue *sq = &vi->sq[qnum];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) bool kick = !netdev_xmit_more();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) bool use_napi = sq->napi.weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* Free up any pending old buffers before queueing new ones. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) free_old_xmit_skbs(sq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (use_napi && kick)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) virtqueue_enable_cb_delayed(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /* timestamp packet in software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) /* Try to transmit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) err = xmit_skb(sq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) /* This should not happen! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) dev->stats.tx_fifo_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) dev_warn(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) "Unexpected TXQ (%d) queue failure: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) qnum, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* Don't wait up for transmitted skbs to be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!use_napi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) nf_reset_ct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* If running out of space, stop queue to avoid getting packets that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * are then unable to transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * An alternative would be to force queuing layer to requeue the skb by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * returned in a normal path of operation: it means that driver is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * maintaining the TX queue stop/start state properly, and causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * the stack to do a non-trivial amount of useless work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * Since most packets only take 1 or 2 ring slots, stopping the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * early means 16 slots are typically wasted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) netif_stop_subqueue(dev, qnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (!use_napi &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) /* More just got used, free them then recheck. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) free_old_xmit_skbs(sq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) netif_start_subqueue(dev, qnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) virtqueue_disable_cb(sq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (kick || netif_xmit_stopped(txq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) u64_stats_update_begin(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) sq->stats.kicks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) u64_stats_update_end(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * Send command via the control virtqueue and check status. Commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * supported by the hypervisor, as indicated by feature bits, should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * never fail unless improperly formatted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) struct scatterlist *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) struct scatterlist *sgs[4], hdr, stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) unsigned out_num = 0, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /* Caller should know better */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) vi->ctrl->status = ~0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) vi->ctrl->hdr.class = class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) vi->ctrl->hdr.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) /* Add header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) sgs[out_num++] = &hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) if (out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) sgs[out_num++] = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /* Add return status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) sgs[out_num] = &stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) if (unlikely(!virtqueue_kick(vi->cvq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return vi->ctrl->status == VIRTIO_NET_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) /* Spin for a response, the kick causes an ioport write, trapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) * into the hypervisor, so the request should be handled immediately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) while (!virtqueue_get_buf(vi->cvq, &tmp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) !virtqueue_is_broken(vi->cvq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) return vi->ctrl->status == VIRTIO_NET_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static int virtnet_set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) struct virtio_device *vdev = vi->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) struct sockaddr *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ret = eth_prepare_mac_addr_change(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) sg_init_one(&sg, addr->sa_data, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) dev_warn(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) "Failed to set mac address by vq command.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* Naturally, this has an atomicity problem. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) for (i = 0; i < dev->addr_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) virtio_cwrite8(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) offsetof(struct virtio_net_config, mac) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) i, addr->sa_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) eth_commit_mac_addr_change(dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) kfree(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static void virtnet_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) struct rtnl_link_stats64 *tot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) u64 tpackets, tbytes, rpackets, rbytes, rdrops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct receive_queue *rq = &vi->rq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) struct send_queue *sq = &vi->sq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) tpackets = sq->stats.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) tbytes = sq->stats.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) rpackets = rq->stats.packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) rbytes = rq->stats.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) rdrops = rq->stats.drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) tot->rx_packets += rpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) tot->tx_packets += tpackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) tot->rx_bytes += rbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) tot->tx_bytes += tbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) tot->rx_dropped += rdrops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) tot->tx_dropped = dev->stats.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) tot->rx_length_errors = dev->stats.rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) tot->rx_frame_errors = dev->stats.rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) static void virtnet_ack_link_announce(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct net_device *dev = vi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) vi->curr_queue_pairs = queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) /* virtnet_open() will refill when device is going to up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) schedule_delayed_work(&vi->refill, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) err = _virtnet_set_queues(vi, queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) static int virtnet_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* Make sure refill_work doesn't re-enable napi! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) cancel_delayed_work_sync(&vi->refill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) napi_disable(&vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) virtnet_napi_tx_disable(&vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) static void virtnet_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct scatterlist sg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct virtio_net_ctrl_mac *mac_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct netdev_hw_addr *ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) int uc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) int mc_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* We can't dynamically set ndo_set_rx_mode, so return gracefully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) VIRTIO_NET_CTRL_RX_PROMISC, sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) vi->ctrl->promisc ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) vi->ctrl->allmulti ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) uc_count = netdev_uc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) mc_count = netdev_mc_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* MAC filter - use one buffer for both lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) mac_data = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) sg_init_table(sg, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /* Store the unicast list and count in the front of the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) netdev_for_each_uc_addr(ha, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) sg_set_buf(&sg[0], mac_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /* multicast list and count fill the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mac_data = (void *)&mac_data->macs[uc_count][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) netdev_for_each_mc_addr(ha, dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sg_set_buf(&sg[1], mac_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) static int virtnet_vlan_rx_add_vid(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) VIRTIO_NET_CTRL_VLAN_ADD, &sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) __be16 proto, u16 vid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) VIRTIO_NET_CTRL_VLAN_DEL, &sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) static void virtnet_clean_affinity(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) if (vi->affinity_hint_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) virtqueue_set_affinity(vi->rq[i].vq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) virtqueue_set_affinity(vi->sq[i].vq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) vi->affinity_hint_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) static void virtnet_set_affinity(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) cpumask_var_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int stragglers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) int group_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) int i, j, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int num_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) int stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) virtnet_clean_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) num_cpu = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) stragglers = num_cpu >= vi->curr_queue_pairs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) num_cpu % vi->curr_queue_pairs :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) cpu = cpumask_next(-1, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) group_size = stride + (i < stragglers ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) for (j = 0; j < group_size; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) cpumask_set_cpu(cpu, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) cpu = cpumask_next_wrap(cpu, cpu_online_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) nr_cpu_ids, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) virtqueue_set_affinity(vi->rq[i].vq, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) virtqueue_set_affinity(vi->sq[i].vq, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) cpumask_clear(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) vi->affinity_hint_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) free_cpumask_var(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) virtnet_set_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) node_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) virtnet_set_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) virtnet_clean_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) static enum cpuhp_state virtionet_online;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) static int virtnet_cpu_notif_add(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) &vi->node_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) &vi->node_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) static void virtnet_get_ringparam(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) ring->rx_pending = ring->rx_max_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) ring->tx_pending = ring->tx_max_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void virtnet_get_drvinfo(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) struct virtio_device *vdev = vi->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /* TODO: Eliminate OOO packets during switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static int virtnet_set_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) u16 queue_pairs = channels->combined_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /* We don't support separate rx/tx channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * We don't allow setting 'other' channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (channels->rx_count || channels->tx_count || channels->other_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* For now we don't support modifying channels while XDP is loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * also when XDP is loaded all RX queues have XDP programs so we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * need to check a single RX queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if (vi->rq[0].xdp_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) err = _virtnet_set_queues(vi, queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) virtnet_set_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) netif_set_real_num_tx_queues(dev, queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) netif_set_real_num_rx_queues(dev, queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) char *p = (char *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) i, virtnet_rq_stats_desc[j].desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) i, virtnet_sq_stats_desc[j].desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) static int virtnet_get_sset_count(struct net_device *dev, int sset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) switch (sset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) VIRTNET_SQ_STATS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static void virtnet_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) unsigned int idx = 0, start, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) const u8 *stats_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) struct receive_queue *rq = &vi->rq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) stats_base = (u8 *)&rq->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) offset = virtnet_rq_stats_desc[j].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) data[idx + j] = *(u64 *)(stats_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) idx += VIRTNET_RQ_STATS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) for (i = 0; i < vi->curr_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) struct send_queue *sq = &vi->sq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) stats_base = (u8 *)&sq->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) offset = virtnet_sq_stats_desc[j].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) data[idx + j] = *(u64 *)(stats_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) idx += VIRTNET_SQ_STATS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) static void virtnet_get_channels(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) channels->combined_count = vi->curr_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) channels->max_combined = vi->max_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) channels->max_other = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) channels->rx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) channels->tx_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) channels->other_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static int virtnet_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return ethtool_virtdev_set_link_ksettings(dev, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) &vi->speed, &vi->duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) static int virtnet_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) cmd->base.speed = vi->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) cmd->base.duplex = vi->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) cmd->base.port = PORT_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) static int virtnet_set_coalesce(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) int i, napi_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (ec->tx_max_coalesced_frames > 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) ec->rx_max_coalesced_frames != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) napi_weight = ec->tx_max_coalesced_frames ? NAPI_POLL_WEIGHT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (napi_weight ^ vi->sq[0].napi.weight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (dev->flags & IFF_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) for (i = 0; i < vi->max_queue_pairs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) vi->sq[i].napi.weight = napi_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) static int virtnet_get_coalesce(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) struct ethtool_coalesce *ec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) struct ethtool_coalesce ec_default = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) .cmd = ETHTOOL_GCOALESCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) .rx_max_coalesced_frames = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) memcpy(ec, &ec_default, sizeof(ec_default));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (vi->sq[0].napi.weight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) ec->tx_max_coalesced_frames = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) static void virtnet_init_settings(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) vi->speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) vi->duplex = DUPLEX_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) static void virtnet_update_settings(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) u8 duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (ethtool_validate_speed(speed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) vi->speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) if (ethtool_validate_duplex(duplex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) vi->duplex = duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) static const struct ethtool_ops virtnet_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) .get_drvinfo = virtnet_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) .get_ringparam = virtnet_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) .get_strings = virtnet_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) .get_sset_count = virtnet_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) .get_ethtool_stats = virtnet_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) .set_channels = virtnet_set_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) .get_channels = virtnet_get_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) .get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) .get_link_ksettings = virtnet_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) .set_link_ksettings = virtnet_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) .set_coalesce = virtnet_set_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) .get_coalesce = virtnet_get_coalesce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) static void virtnet_freeze_down(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) /* Make sure no work handler is accessing the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) flush_work(&vi->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) netif_tx_lock_bh(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) netif_device_detach(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) netif_tx_unlock_bh(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) cancel_delayed_work_sync(&vi->refill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (netif_running(vi->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) napi_disable(&vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) virtnet_napi_tx_disable(&vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) static int init_vqs(struct virtnet_info *vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static int virtnet_restore_up(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) err = init_vqs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) if (netif_running(vi->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) for (i = 0; i < vi->curr_queue_pairs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) schedule_delayed_work(&vi->refill, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) virtnet_napi_tx_enable(vi, vi->sq[i].vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) &vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) netif_tx_lock_bh(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) netif_device_attach(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) netif_tx_unlock_bh(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) u64 offloads = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (!vi->guest_offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return virtnet_set_guest_offloads(vi, offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) u64 offloads = vi->guest_offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) if (!vi->guest_offloads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return virtnet_set_guest_offloads(vi, offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) struct bpf_prog *old_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) u16 xdp_qp = 0, curr_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (dev->mtu > max_sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) xdp_qp = nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) /* XDP requires extra queues for XDP_TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (curr_qp + xdp_qp > vi->max_queue_pairs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) curr_qp + xdp_qp, vi->max_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) xdp_qp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) if (!prog && !old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) bpf_prog_add(prog, vi->max_queue_pairs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Make sure NAPI is not using any XDP TX queues for RX. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) napi_disable(&vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) virtnet_napi_tx_disable(&vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) virtnet_restore_guest_offloads(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) vi->xdp_queue_pairs = xdp_qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) if (prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) vi->xdp_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (i == 0 && !old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) virtnet_clear_guest_offloads(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) vi->xdp_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) bpf_prog_put(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) virtnet_napi_tx_enable(vi, vi->sq[i].vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) &vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (!prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) virtnet_clear_guest_offloads(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) for (i = 0; i < vi->max_queue_pairs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (netif_running(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) virtnet_napi_tx_enable(vi, vi->sq[i].vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) &vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) bpf_prog_sub(prog, vi->max_queue_pairs - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) switch (xdp->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) case XDP_SETUP_PROG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) ret = snprintf(buf, len, "sby");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) if (ret >= len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) static int virtnet_set_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) struct virtnet_info *vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) u64 offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (!vi->has_cvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) if ((dev->features ^ features) & NETIF_F_GRO_HW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (vi->xdp_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) if (features & NETIF_F_GRO_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) offloads = vi->guest_offloads_capable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) offloads = vi->guest_offloads_capable &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) ~GUEST_OFFLOAD_GRO_HW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) err = virtnet_set_guest_offloads(vi, offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) vi->guest_offloads = offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) static const struct net_device_ops virtnet_netdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) .ndo_open = virtnet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) .ndo_stop = virtnet_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) .ndo_start_xmit = start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) .ndo_set_mac_address = virtnet_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) .ndo_set_rx_mode = virtnet_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) .ndo_get_stats64 = virtnet_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) .ndo_bpf = virtnet_xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) .ndo_xdp_xmit = virtnet_xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) .ndo_features_check = passthru_features_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) .ndo_get_phys_port_name = virtnet_get_phys_port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) .ndo_set_features = virtnet_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static void virtnet_config_changed_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct virtnet_info *vi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) container_of(work, struct virtnet_info, config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) u16 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct virtio_net_config, status, &v) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) if (v & VIRTIO_NET_S_ANNOUNCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) netdev_notify_peers(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) virtnet_ack_link_announce(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /* Ignore unknown (future) status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) v &= VIRTIO_NET_S_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (vi->status == v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) vi->status = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (vi->status & VIRTIO_NET_S_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) virtnet_update_settings(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) netif_carrier_on(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) netif_tx_wake_all_queues(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) netif_carrier_off(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) netif_tx_stop_all_queues(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static void virtnet_config_changed(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) schedule_work(&vi->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) static void virtnet_free_queues(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) __netif_napi_del(&vi->rq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) __netif_napi_del(&vi->sq[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /* We called __netif_napi_del(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * we need to respect an RCU grace period before freeing vi->rq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) synchronize_net();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) kfree(vi->rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) kfree(vi->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) kfree(vi->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) static void _free_receive_bufs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct bpf_prog *old_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) while (vi->rq[i].pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) if (old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) bpf_prog_put(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) static void free_receive_bufs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) _free_receive_bufs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) static void free_receive_page_frags(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) for (i = 0; i < vi->max_queue_pairs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (vi->rq[i].alloc_frag.page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) put_page(vi->rq[i].alloc_frag.page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) static void free_unused_bufs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) struct virtqueue *vq = vi->sq[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) if (!is_xdp_frame(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) dev_kfree_skb(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) xdp_return_frame(ptr_to_xdp(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct virtqueue *vq = vi->rq[i].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (vi->mergeable_rx_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) } else if (vi->big_packets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) give_pages(&vi->rq[i], buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) put_page(virt_to_head_page(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) static void virtnet_del_vqs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) struct virtio_device *vdev = vi->vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) virtnet_clean_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) virtnet_free_queues(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) /* How large should a single buffer be so a queue full of these can fit at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) * least one full packet?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) * Logic below assumes the mergeable buffer header is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) unsigned int rq_size = virtqueue_get_vring_size(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return max(max(min_buf_len, hdr_len) - hdr_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) (unsigned int)GOOD_PACKET_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) static int virtnet_find_vqs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) vq_callback_t **callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) struct virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) int i, total_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) const char **names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) bool *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * possible control vq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) total_vqs = vi->max_queue_pairs * 2 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) /* Allocate space for find_vqs parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (!vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) goto err_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) if (!callbacks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) goto err_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (!names)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) goto err_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) if (!vi->big_packets || vi->mergeable_rx_bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) goto err_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) /* Parameters for control virtqueue, if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (vi->has_cvq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) callbacks[total_vqs - 1] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) names[total_vqs - 1] = "control";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) /* Allocate/initialize parameters for send/receive virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) callbacks[rxq2vq(i)] = skb_recv_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) callbacks[txq2vq(i)] = skb_xmit_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) sprintf(vi->rq[i].name, "input.%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) sprintf(vi->sq[i].name, "output.%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) names[rxq2vq(i)] = vi->rq[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) names[txq2vq(i)] = vi->sq[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) ctx[rxq2vq(i)] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) names, ctx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) goto err_find;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (vi->has_cvq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) vi->cvq = vqs[total_vqs - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) vi->rq[i].vq = vqs[rxq2vq(i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) vi->sq[i].vq = vqs[txq2vq(i)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /* run here: ret == 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) err_find:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) err_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) kfree(names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) err_names:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) kfree(callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) err_callback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) err_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) static int virtnet_alloc_queues(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (!vi->ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) goto err_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (!vi->sq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) goto err_sq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (!vi->rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) goto err_rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) INIT_DELAYED_WORK(&vi->refill, refill_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) for (i = 0; i < vi->max_queue_pairs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) vi->rq[i].pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) napi_weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) napi_tx ? napi_weight : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) u64_stats_init(&vi->rq[i].stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) u64_stats_init(&vi->sq[i].stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) err_rq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) kfree(vi->sq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) err_sq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) kfree(vi->ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) err_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) static int init_vqs(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) /* Allocate send & receive queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) ret = virtnet_alloc_queues(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) ret = virtnet_find_vqs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) virtnet_set_affinity(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) virtnet_free_queues(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) struct virtnet_info *vi = netdev_priv(queue->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) unsigned int queue_index = get_netdev_rx_queue_index(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) unsigned int headroom = virtnet_get_headroom(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) struct ewma_pkt_len *avg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) BUG_ON(queue_index >= vi->max_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) avg = &vi->rq[queue_index].mrg_avg_pkt_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) return sprintf(buf, "%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) get_mergeable_buf_len(&vi->rq[queue_index], avg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) SKB_DATA_ALIGN(headroom + tailroom)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) __ATTR_RO(mergeable_rx_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) static struct attribute *virtio_net_mrg_rx_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) &mergeable_rx_buffer_size_attribute.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) static const struct attribute_group virtio_net_mrg_rx_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) .name = "virtio_net",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) .attrs = virtio_net_mrg_rx_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) static bool virtnet_fail_on_feature(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) unsigned int fbit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) const char *fname, const char *dname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) if (!virtio_has_feature(vdev, fbit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) dev_err(&vdev->dev, "device advertises feature %s but not %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) fname, dname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) #define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) static bool virtnet_validate_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) "VIRTIO_NET_F_CTRL_VQ") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) "VIRTIO_NET_F_CTRL_VQ") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) "VIRTIO_NET_F_CTRL_VQ") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) "VIRTIO_NET_F_CTRL_VQ"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) #define MIN_MTU ETH_MIN_MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) #define MAX_MTU ETH_MAX_MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) static int virtnet_validate(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (!vdev->config->get) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) dev_err(&vdev->dev, "%s failure: config access disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) if (!virtnet_validate_features(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) int mtu = virtio_cread16(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) offsetof(struct virtio_net_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (mtu < MIN_MTU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) static int virtnet_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) int i, err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) struct virtnet_info *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) u16 max_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) int mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) /* Find if host supports multiqueue virtio_net device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) struct virtio_net_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) max_virtqueue_pairs, &max_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) /* We need at least 2 queue's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) max_queue_pairs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) /* Allocate ourselves a network device with room for our info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) /* Set up network device as normal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) dev->netdev_ops = &virtnet_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) dev->features = NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) dev->ethtool_ops = &virtnet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) SET_NETDEV_DEV(dev, &vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) /* Do we support "hardware" checksums? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) /* This opens up the world of extra features. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) if (csum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) dev->hw_features |= NETIF_F_TSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) | NETIF_F_TSO_ECN | NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) /* Individual feature bits: what can host handle? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) dev->hw_features |= NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) dev->hw_features |= NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) dev->hw_features |= NETIF_F_TSO_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) dev->features |= NETIF_F_GSO_ROBUST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) if (gso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) /* (!csum && gso) case will be fixed by register_netdev() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) dev->features |= NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) dev->features |= NETIF_F_GRO_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) dev->hw_features |= NETIF_F_GRO_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) dev->vlan_features = dev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) /* MTU range: 68 - 65535 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) dev->min_mtu = MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) dev->max_mtu = MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /* Configuration may specify what MAC to use. Otherwise random. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) virtio_cread_bytes(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) offsetof(struct virtio_net_config, mac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) dev->dev_addr, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) eth_hw_addr_random(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) /* Set up our device-specific information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) vi = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) vi->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) vi->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) vdev->priv = vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) INIT_WORK(&vi->config_work, virtnet_config_changed_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /* If we can receive ANY GSO packets, we must allocate large ones. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) vi->big_packets = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) vi->mergeable_rx_bufs = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) vi->hdr_len = sizeof(struct virtio_net_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) vi->any_header_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) vi->has_cvq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) mtu = virtio_cread16(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) offsetof(struct virtio_net_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) mtu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (mtu < dev->min_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) /* Should never trigger: MTU was previously validated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) * in virtnet_validate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) dev_err(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) "device MTU appears to have changed it is now %d < %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) mtu, dev->min_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) dev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) dev->max_mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) /* TODO: size buffers correctly in this case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (dev->mtu > ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) vi->big_packets = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) if (vi->any_header_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) dev->needed_headroom = vi->hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) /* Enable multiqueue by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (num_online_cpus() >= max_queue_pairs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) vi->curr_queue_pairs = max_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) vi->curr_queue_pairs = num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) vi->max_queue_pairs = max_queue_pairs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) err = init_vqs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) if (vi->mergeable_rx_bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) virtnet_init_settings(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) vi->failover = net_failover_create(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if (IS_ERR(vi->failover)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) err = PTR_ERR(vi->failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) goto free_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) pr_debug("virtio_net: registering device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) goto free_failover;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) err = virtnet_cpu_notif_add(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) pr_debug("virtio_net: registering cpu notifier failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) goto free_unregister_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) virtnet_set_queues(vi, vi->curr_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) /* Assume link up if device can't report link status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) otherwise get link status from config. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) schedule_work(&vi->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) vi->status = VIRTIO_NET_S_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) virtnet_update_settings(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (virtio_has_feature(vi->vdev, guest_offloads[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) set_bit(guest_offloads[i], &vi->guest_offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) vi->guest_offloads_capable = vi->guest_offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) dev->name, max_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) free_unregister_netdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) vi->vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) unregister_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) free_failover:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) net_failover_destroy(vi->failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) free_vqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) cancel_delayed_work_sync(&vi->refill);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) free_receive_page_frags(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) virtnet_del_vqs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) static void remove_vq_common(struct virtnet_info *vi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) vi->vdev->config->reset(vi->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) /* Free unused buffers in both send and recv, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) free_unused_bufs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) free_receive_bufs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) free_receive_page_frags(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) virtnet_del_vqs(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) static void virtnet_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) virtnet_cpu_notif_remove(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /* Make sure no work handler is accessing the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) flush_work(&vi->config_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) unregister_netdev(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) net_failover_destroy(vi->failover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) remove_vq_common(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) free_netdev(vi->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) virtnet_cpu_notif_remove(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) virtnet_freeze_down(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) remove_vq_common(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct virtnet_info *vi = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) err = virtnet_restore_up(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) virtnet_set_queues(vi, vi->curr_queue_pairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) err = virtnet_cpu_notif_add(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) virtnet_freeze_down(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) remove_vq_common(vi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) static struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) #define VIRTNET_FEATURES \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) VIRTIO_NET_F_MAC, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) VIRTIO_NET_F_CTRL_MAC_ADDR, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) static unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) VIRTNET_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) static unsigned int features_legacy[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) VIRTNET_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) VIRTIO_NET_F_GSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) VIRTIO_F_ANY_LAYOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) static struct virtio_driver virtio_net_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) .feature_table = features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) .feature_table_size = ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) .feature_table_legacy = features_legacy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) .driver.name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) .driver.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) .id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) .validate = virtnet_validate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) .probe = virtnet_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) .remove = virtnet_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) .config_changed = virtnet_config_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) .freeze = virtnet_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) .restore = virtnet_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) static __init int virtio_net_driver_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) virtnet_cpu_online,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) virtnet_cpu_down_prep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) virtionet_online = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) NULL, virtnet_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) goto err_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) ret = register_virtio_driver(&virtio_net_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) goto err_virtio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) err_virtio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) err_dead:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) cpuhp_remove_multi_state(virtionet_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) module_init(virtio_net_driver_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) static __exit void virtio_net_driver_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) unregister_virtio_driver(&virtio_net_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) cpuhp_remove_multi_state(virtionet_online);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) module_exit(virtio_net_driver_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) MODULE_DESCRIPTION("Virtio network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) MODULE_LICENSE("GPL");