^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Hank Janssen <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/netpoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <net/arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "hyperv_net.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RING_SIZE_MIN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define RETRY_US_LO 5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define RETRY_US_HI 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define RETRY_MAX 2000 /* >10 sec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define LINKCHANGE_INT (2 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define VF_TAKEOVER_INT (HZ / 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static unsigned int ring_size __ro_after_init = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) module_param(ring_size, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned int netvsc_ring_bytes __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) NETIF_MSG_LINK | NETIF_MSG_IFUP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) NETIF_MSG_TX_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int debug = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) module_param(debug, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static LIST_HEAD(netvsc_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static void netvsc_change_rx_flags(struct net_device *net, int change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (change & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) inc = (net->flags & IFF_PROMISC) ? 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dev_set_promiscuity(vf_netdev, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (change & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) dev_set_allmulti(vf_netdev, inc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void netvsc_set_rx_mode(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct net_device *vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dev_uc_sync(vf_netdev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dev_mc_sync(vf_netdev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) nvdev = rcu_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rndis_filter_update(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void netvsc_tx_enable(struct netvsc_device *nvscdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) nvscdev->tx_disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) virt_wmb(); /* ensure queue wake up mechanism is on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) netif_tx_wake_all_queues(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int netvsc_open(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct rndis_device *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) netif_carrier_off(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Open up the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ret = rndis_filter_open(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) netdev_err(net, "unable to open device (ret %d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) rdev = nvdev->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!rdev->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) netif_carrier_on(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) netvsc_tx_enable(nvdev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Setting synthetic device up transparently sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * slave as up. If open fails, then slave will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * still be offline (and not used).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ret = dev_open(vf_netdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) netdev_warn(net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "unable to open slave: %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) vf_netdev->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned int retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Ensure pending bytes in ring are read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u32 aread = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) for (i = 0; i < nvdev->num_chn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct vmbus_channel *chn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) = nvdev->chan_table[i].channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* make sure receive not running now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) napi_synchronize(&nvdev->chan_table[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) aread = hv_get_bytes_to_read(&chn->inbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (aread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) aread = hv_get_bytes_to_read(&chn->outbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (aread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (aread == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (++retry > RETRY_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) usleep_range(RETRY_US_LO, RETRY_US_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void netvsc_tx_disable(struct netvsc_device *nvscdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (nvscdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) nvscdev->tx_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) virt_wmb(); /* ensure txq will not wake up after stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) netif_tx_disable(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int netvsc_close(struct net_device *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct net_device_context *net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct net_device *vf_netdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) = rtnl_dereference(net_device_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) netvsc_tx_disable(nvdev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* No need to close rndis filter if it is removed already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = rndis_filter_close(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) netdev_err(net, "unable to close device (ret %d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = netvsc_wait_until_empty(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) netdev_err(net, "Ring buffer not empty after closing rndis\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_close(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline void *init_ppi_data(struct rndis_message *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 ppi_size, u32 pkt_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct rndis_packet *rndis_pkt = &msg->msg.pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct rndis_per_packet_info *ppi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) rndis_pkt->data_offset += ppi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) + rndis_pkt->per_pkt_info_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ppi->size = ppi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ppi->type = pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ppi->internal = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rndis_pkt->per_pkt_info_len += ppi_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return ppi + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * packets. We can use ethtool to change UDP hash level when necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static inline u32 netvsc_get_hash(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) const struct net_device_context *ndc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct flow_keys flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 hash, pkt_proto = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static u32 hashrnd __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) net_get_random_once(&hashrnd, sizeof(hashrnd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) switch (flow.basic.ip_proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case IPPROTO_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (flow.basic.n_proto == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pkt_proto = HV_TCP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) else if (flow.basic.n_proto == htons(ETH_P_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pkt_proto = HV_TCP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) case IPPROTO_UDP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (flow.basic.n_proto == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pkt_proto = HV_UDP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) else if (flow.basic.n_proto == htons(ETH_P_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) pkt_proto = HV_UDP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (pkt_proto & ndc->l4_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (flow.basic.n_proto == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else if (flow.basic.n_proto == htons(ETH_P_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __skb_set_sw_hash(skb, hash, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline int netvsc_get_tx_queue(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct sk_buff *skb, int old_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) const struct net_device_context *ndc = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct sock *sk = skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int q_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) (VRSS_SEND_TAB_SIZE - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* If queue index changed record the new value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (q_idx != old_idx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sk_tx_queue_set(sk, q_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return q_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Select queue for transmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * If a valid queue has already been assigned, then use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Otherwise compute tx queue based on hash and the send table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * This is basically similar to default (netdev_pick_tx) with the added step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * of using the host send_table when no other queue has been assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * TODO support XPS - but get_xps_queue not exported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int q_idx = sk_tx_queue_get(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* If forwarding a packet, we use the recorded queue when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * available for better cache locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (skb_rx_queue_recorded(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) q_idx = skb_get_rx_queue(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return q_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct net_device *sb_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct net_device_context *ndc = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct net_device *vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u16 txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) vf_netdev = rcu_dereference(ndc->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (vf_ops->ndo_select_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) txq = netdev_pick_tx(vf_netdev, skb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Record the queue selected by VF so that it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * used for common case where VF has more queues than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * the synthetic device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) txq = netvsc_pick_tx(ndev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) while (txq >= ndev->real_num_tx_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) txq -= ndev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct hv_page_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hvpfn += offset >> HV_HYP_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) offset = offset & ~HV_HYP_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) bytes = HV_HYP_PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (bytes > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) bytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pb[j].pfn = hvpfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pb[j].offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) pb[j].len = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) offset += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) len -= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (offset == HV_HYP_PAGE_SIZE && len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hvpfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return j + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct hv_netvsc_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct hv_page_buffer *pb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 slots_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) char *data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* The packet is laid out thus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * 1. hdr: RNDIS header and PPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * 2. skb linear data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * 3. skb fragment data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) offset_in_hvpage(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) &pb[slots_used]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) packet->rmsg_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) packet->rmsg_pgcnt = slots_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) slots_used += fill_pg_buf(virt_to_hvpfn(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) offset_in_hvpage(data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) skb_headlen(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) &pb[slots_used]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) for (i = 0; i < frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) skb_frag_t *frag = skb_shinfo(skb)->frags + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) skb_frag_off(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) skb_frag_size(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) &pb[slots_used]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return slots_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int count_skb_frag_slots(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int i, frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) for (i = 0; i < frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) skb_frag_t *frag = skb_shinfo(skb)->frags + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned long size = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned long offset = skb_frag_off(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Skip unused frames from start of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) offset &= ~HV_HYP_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) pages += HVPFN_UP(offset + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static int netvsc_get_slots(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) char *data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) unsigned int offset = offset_in_hvpage(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) unsigned int len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int frag_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) frag_slots = count_skb_frag_slots(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return slots + frag_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static u32 net_checksum_info(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct iphdr *ip = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (ip->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return TRANSPORT_INFO_IPV4_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else if (ip->protocol == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return TRANSPORT_INFO_IPV4_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct ipv6hdr *ip6 = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (ip6->nexthdr == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return TRANSPORT_INFO_IPV6_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) else if (ip6->nexthdr == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return TRANSPORT_INFO_IPV6_UDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return TRANSPORT_INFO_NOT_IP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /* Send skb on the slave VF device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unsigned int len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) skb->dev = vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rc = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct netvsc_vf_pcpu_stats *pcpu_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) = this_cpu_ptr(ndev_ctx->vf_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u64_stats_update_begin(&pcpu_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) pcpu_stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) pcpu_stats->tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) u64_stats_update_end(&pcpu_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct net_device_context *net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct hv_netvsc_packet *packet = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned int num_data_pgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct rndis_message *rndis_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct net_device *vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) u32 rndis_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* If VF is present and up then redirect packets to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * Skip the VF if it is marked down or has no carrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * If netpoll is in uses, then VF can not be used either.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (vf_netdev && netif_running(vf_netdev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return netvsc_vf_xmit(net, vf_netdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /* We will atmost need two pages to describe the rndis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * of pages in a single packet. If skb is scattered around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * more pages we try linearizing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) num_data_pgs = netvsc_get_slots(skb) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ++net_device_ctx->eth_stats.tx_scattered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto no_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) num_data_pgs = netvsc_get_slots(skb) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ++net_device_ctx->eth_stats.tx_too_big;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * Place the rndis header in the skb head room and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * the skb->cb will be used for hv_netvsc_packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto no_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Use the skb control buffer for building up the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) sizeof_field(struct sk_buff, cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) packet = (struct hv_netvsc_packet *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) packet->q_idx = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) packet->total_data_buflen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) packet->total_bytes = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) packet->total_packets = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rndis_msg = (struct rndis_message *)skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Add the rndis header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) rndis_msg->msg_len = packet->total_data_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rndis_msg->msg.pkt = (struct rndis_packet) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .data_offset = sizeof(struct rndis_packet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .data_len = packet->total_data_buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .per_pkt_info_offset = sizeof(struct rndis_packet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) hash = skb_get_hash_raw(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (hash != 0 && net->real_num_tx_queues > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 *hash_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rndis_msg_size += NDIS_HASH_PPI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) NBL_HASH_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *hash_info = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* When using AF_PACKET we need to drop VLAN header from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * the frame and update the SKB to allow the HOST OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * to transmit the 802.1Q packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (skb->protocol == htons(ETH_P_8021Q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u16 vlan_tci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) ++net_device_ctx->eth_stats.vlan_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Update the NDIS header pkt lengths */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) packet->total_data_buflen -= VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) packet->total_bytes -= VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rndis_msg->msg_len = packet->total_data_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (skb_vlan_tag_present(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct ndis_pkt_8021q_info *vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rndis_msg_size += NDIS_VLAN_PPI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) IEEE_8021Q_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) vlan->value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) vlan->vlanid = skb_vlan_tag_get_id(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) vlan->cfi = skb_vlan_tag_get_cfi(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) vlan->pri = skb_vlan_tag_get_prio(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct ndis_tcp_lso_info *lso_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) rndis_msg_size += NDIS_LSO_PPI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) TCP_LARGESEND_PKTINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) lso_info->value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) lso_info->lso_v2_transmit.ip_version =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) ip_hdr(skb)->tot_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ip_hdr(skb)->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) tcp_hdr(skb)->check =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) lso_info->lso_v2_transmit.ip_version =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tcp_v6_gso_csum_prep(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct ndis_tcp_ip_checksum_info *csum_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rndis_msg_size += NDIS_CSUM_PPI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) TCPIP_CHKSUM_PKTINFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) csum_info->value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) csum_info->transmit.is_ipv4 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (ip_hdr(skb)->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) csum_info->transmit.tcp_checksum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) csum_info->transmit.udp_checksum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) csum_info->transmit.is_ipv6 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) csum_info->transmit.tcp_checksum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) csum_info->transmit.udp_checksum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Can't do offload of this type of checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (skb_checksum_help(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Start filling in the page buffers with the rndis hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rndis_msg->msg_len += rndis_msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) packet->total_data_buflen = rndis_msg->msg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) skb, packet, pb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* timestamp packet in software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (likely(ret == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ++net_device_ctx->eth_stats.tx_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (ret == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ++net_device_ctx->eth_stats.tx_no_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) net->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) no_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ++net_device_ctx->eth_stats.tx_no_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return netvsc_xmit(skb, ndev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * netvsc_linkstatus_callback - Link up/down notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void netvsc_linkstatus_callback(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct rndis_message *resp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) struct netvsc_reconfig *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Ensure the packet is big enough to access its fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) resp->msg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* Update the physical link speed when changing to another vSwitch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) speed = *(u32 *)((void *)indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) + indicate->status_buf_offset) / 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ndev_ctx->speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Handle these link change statuses below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (net->reg_state != NETREG_REGISTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) event = kzalloc(sizeof(*event), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) event->event = indicate->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) spin_lock_irqsave(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) list_add_tail(&event->list, &ndev_ctx->reconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) spin_unlock_irqrestore(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) schedule_delayed_work(&ndev_ctx->dwork, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) skb->queue_mapping = skb_get_rx_queue(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) __skb_push(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) rc = netvsc_xmit(skb, ndev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (dev_xmit_complete(rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ndev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) static void netvsc_comp_ipcsum(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct iphdr *iph = (struct iphdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) iph->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) iph->check = ip_fast_csum(iph, iph->ihl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) struct netvsc_channel *nvchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct xdp_buff *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) struct napi_struct *napi = &nvchan->napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) const struct ndis_tcp_ip_checksum_info *csum_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) nvchan->rsc.csum_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) const u32 *hash_info = nvchan->rsc.hash_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) void *xbuf = xdp->data_hard_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (xbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) unsigned int hdroom = xdp->data - xdp->data_hard_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) unsigned int xlen = xdp->data_end - xdp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) unsigned int frag_size = xdp->frame_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) skb = build_skb(xbuf, frag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) __free_page(virt_to_page(xbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) skb_reserve(skb, hdroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) skb_put(skb, xlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) skb->dev = napi->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Copy to skb. This copy is needed here since the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * pointed by hv_netvsc_packet cannot be deallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) for (i = 0; i < nvchan->rsc.cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) skb_put_data(skb, nvchan->rsc.data[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) nvchan->rsc.len[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) skb->protocol = eth_type_trans(skb, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) /* skb is already created with CHECKSUM_NONE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) skb_checksum_none_assert(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Incoming packets may have IP header checksum verified by the host.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * They may not have IP header checksum computed after coalescing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * We compute it here if the flags are set, because on Linux, the IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * checksum is always checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) csum_info->receive.ip_checksum_succeeded &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) netvsc_comp_ipcsum(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* Do L4 checksum offload if enabled and present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (csum_info && (net->features & NETIF_F_RXCSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (csum_info->receive.tcp_checksum_succeeded ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) csum_info->receive.udp_checksum_succeeded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (hash_info && (net->features & NETIF_F_RXHASH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (vlan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) (vlan->cfi ? VLAN_CFI_MASK : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) vlan_tci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * netvsc_recv_callback - Callback when we receive a packet from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * "wire" on the specified device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) int netvsc_recv_callback(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct netvsc_channel *nvchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct net_device_context *net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct vmbus_channel *channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) u16 q_idx = channel->offermsg.offer.sub_channel_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct netvsc_stats *rx_stats = &nvchan->rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (net->reg_state != NETREG_REGISTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return NVSP_STAT_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) act = netvsc_run_xdp(net, nvchan, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (act != XDP_PASS && act != XDP_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u64_stats_update_begin(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rx_stats->xdp_drop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) u64_stats_update_end(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return NVSP_STAT_SUCCESS; /* consumed by XDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* Allocate a skb - TODO direct I/O to pages? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (unlikely(!skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ++net_device_ctx->eth_stats.rx_no_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return NVSP_STAT_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) skb_record_rx_queue(skb, q_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Even if injecting the packet, record the statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * on the synthetic device because modifying the VF device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * statistics will not work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) u64_stats_update_begin(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rx_stats->packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) rx_stats->bytes += nvchan->rsc.pktlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (skb->pkt_type == PACKET_BROADCAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ++rx_stats->broadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) else if (skb->pkt_type == PACKET_MULTICAST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ++rx_stats->multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) u64_stats_update_end(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (act == XDP_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) netvsc_xdp_xmit(skb, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return NVSP_STAT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) napi_gro_receive(&nvchan->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return NVSP_STAT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static void netvsc_get_drvinfo(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct ethtool_drvinfo *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) static void netvsc_get_channels(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct ethtool_channels *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct net_device_context *net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (nvdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) channel->max_combined = nvdev->max_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) channel->combined_count = nvdev->num_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* Alloc struct netvsc_device_info, and initialize it from either existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * struct netvsc_device, or from default values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct netvsc_device_info *dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (!dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (nvdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) dev_info->num_chn = nvdev->num_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dev_info->send_sections = nvdev->send_section_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dev_info->send_section_size = nvdev->send_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) dev_info->recv_sections = nvdev->recv_section_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) dev_info->recv_section_size = nvdev->recv_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) memcpy(dev_info->rss_key, nvdev->extension->rss_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) NETVSC_HASH_KEYLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) prog = netvsc_xdp_get(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) bpf_prog_inc(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) dev_info->bprog = prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) dev_info->send_sections = NETVSC_DEFAULT_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) dev_info->recv_sections = NETVSC_DEFAULT_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) /* Free struct netvsc_device_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (dev_info->bprog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) bpf_prog_put(dev_info->bprog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) kfree(dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static int netvsc_detach(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct netvsc_device *nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct hv_device *hdev = ndev_ctx->device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Don't try continuing to try and setup sub channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (cancel_work_sync(&nvdev->subchan_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) nvdev->num_chn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) netvsc_xdp_set(ndev, NULL, NULL, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* If device was up (receiving) then shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) netvsc_tx_disable(nvdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ret = rndis_filter_close(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) "unable to close device (ret %d).\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ret = netvsc_wait_until_empty(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) "Ring buffer not empty after closing rndis\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) rndis_filter_device_remove(hdev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) static int netvsc_attach(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct netvsc_device_info *dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct hv_device *hdev = ndev_ctx->device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct rndis_device *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) nvdev = rndis_filter_device_add(hdev, dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (IS_ERR(nvdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return PTR_ERR(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (nvdev->num_chn > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ret = rndis_set_subchannel(ndev, nvdev, dev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* if unavailable, just proceed with one queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) nvdev->max_chn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) nvdev->num_chn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) prog = dev_info->bprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) bpf_prog_inc(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) bpf_prog_put(prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* In any case device is now ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) nvdev->tx_disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) netif_device_attach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Note: enable and attach happen when sub-channels setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) netif_carrier_off(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ret = rndis_filter_open(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) rdev = nvdev->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!rdev->link_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) netif_carrier_on(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) netif_device_detach(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) rndis_filter_device_remove(hdev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static int netvsc_set_channels(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct ethtool_channels *channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct net_device_context *net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) unsigned int orig, count = channels->combined_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct netvsc_device_info *device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /* We do not support separate count for rx, tx, or other */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (count == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) channels->rx_count || channels->tx_count || channels->other_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (!nvdev || nvdev->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (count > nvdev->max_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) orig = nvdev->num_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) device_info = netvsc_devinfo_get(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) device_info->num_chn = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ret = netvsc_detach(net, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) ret = netvsc_attach(net, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) device_info->num_chn = orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (netvsc_attach(net, device_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) netdev_err(net, "restoring channel setting failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) static void netvsc_init_settings(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ndc->l4_hash = HV_DEFAULT_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ndc->speed = SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ndc->duplex = DUPLEX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev->features = NETIF_F_LRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static int netvsc_get_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct net_device *vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) vf_netdev = rtnl_dereference(ndc->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return __ethtool_get_link_ksettings(vf_netdev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cmd->base.speed = ndc->speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) cmd->base.duplex = ndc->duplex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) cmd->base.port = PORT_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static int netvsc_set_link_ksettings(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) const struct ethtool_link_ksettings *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (!vf_netdev->ethtool_ops->set_link_ksettings)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ethtool_virtdev_set_link_ksettings(dev, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) &ndc->speed, &ndc->duplex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) static int netvsc_change_mtu(struct net_device *ndev, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) struct net_device_context *ndevctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) int orig_mtu = ndev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct netvsc_device_info *device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!nvdev || nvdev->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) device_info = netvsc_devinfo_get(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (!device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* Change MTU of underlying VF netdev first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) ret = dev_set_mtu(vf_netdev, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ret = netvsc_detach(ndev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) goto rollback_vf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) ndev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ret = netvsc_attach(ndev, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* Attempt rollback to original MTU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) ndev->mtu = orig_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (netvsc_attach(ndev, device_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) netdev_err(ndev, "restoring mtu failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) rollback_vf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) dev_set_mtu(vf_netdev, orig_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void netvsc_get_vf_stats(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct netvsc_vf_pcpu_stats *tot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) memset(tot, 0, sizeof(*tot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) const struct netvsc_vf_pcpu_stats *stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) = per_cpu_ptr(ndev_ctx->vf_stats, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) rx_packets = stats->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) tx_packets = stats->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) rx_bytes = stats->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) tx_bytes = stats->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) tot->rx_packets += rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) tot->tx_packets += tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) tot->rx_bytes += rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) tot->tx_bytes += tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) tot->tx_dropped += stats->tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static void netvsc_get_pcpu_stats(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct netvsc_ethtool_pcpu_stats *pcpu_tot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) /* fetch percpu stats of vf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) const struct netvsc_vf_pcpu_stats *stats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) per_cpu_ptr(ndev_ctx->vf_stats, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) this_tot->vf_rx_packets = stats->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) this_tot->vf_tx_packets = stats->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) this_tot->vf_rx_bytes = stats->rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) this_tot->vf_tx_bytes = stats->tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) this_tot->rx_packets = this_tot->vf_rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) this_tot->tx_packets = this_tot->vf_tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) this_tot->rx_bytes = this_tot->vf_rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) this_tot->tx_bytes = this_tot->vf_tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* fetch percpu stats of netvsc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) for (i = 0; i < nvdev->num_chn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) const struct netvsc_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) struct netvsc_ethtool_pcpu_stats *this_tot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) &pcpu_tot[nvchan->channel->target_cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) u64 packets, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) stats = &nvchan->tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) packets = stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) bytes = stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) this_tot->tx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) this_tot->tx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) stats = &nvchan->rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) packets = stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) bytes = stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) this_tot->rx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) this_tot->rx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void netvsc_get_stats64(struct net_device *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct rtnl_link_stats64 *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct net_device_context *ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) struct netvsc_vf_pcpu_stats vf_tot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) nvdev = rcu_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) netdev_stats_to_stats64(t, &net->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) netvsc_get_vf_stats(net, &vf_tot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) t->rx_packets += vf_tot.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) t->tx_packets += vf_tot.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) t->rx_bytes += vf_tot.rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) t->tx_bytes += vf_tot.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) t->tx_dropped += vf_tot.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) for (i = 0; i < nvdev->num_chn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) const struct netvsc_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) u64 packets, bytes, multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) stats = &nvchan->tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) packets = stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) bytes = stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) t->tx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) t->tx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) stats = &nvchan->rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) packets = stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) bytes = stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) multicast = stats->multicast + stats->broadcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) t->rx_bytes += bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) t->rx_packets += packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) t->multicast += multicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) struct net_device_context *ndc = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) err = eth_prepare_mac_addr_change(ndev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) err = dev_set_mac_address(vf_netdev, addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) eth_commit_mac_addr_change(ndev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) } else if (vf_netdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* rollback change on VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) dev_set_mac_address(vf_netdev, addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) char name[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) } netvsc_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }, pcpu_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) { "cpu%u_rx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) { "cpu%u_rx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) { "cpu%u_tx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) { "cpu%u_tx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) { "cpu%u_vf_rx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) { "cpu%u_vf_rx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) { "cpu%u_vf_tx_packets",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) { "cpu%u_vf_tx_bytes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }, vf_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* statistics per queue (rx/tx packets/bytes) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int netvsc_get_sset_count(struct net_device *dev, int string_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) switch (string_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return NETVSC_GLOBAL_STATS_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) + NETVSC_VF_STATS_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) + NETVSC_QUEUE_STATS_LEN(nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) + NETVSC_PCPU_STATS_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static void netvsc_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct ethtool_stats *stats, u64 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) const void *nds = &ndc->eth_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) const struct netvsc_stats *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) struct netvsc_vf_pcpu_stats sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct netvsc_ethtool_pcpu_stats *pcpu_sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) u64 packets, bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) u64 xdp_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) int i, j, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) netvsc_get_vf_stats(dev, &sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) for (j = 0; j < nvdev->num_chn; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) qstats = &nvdev->chan_table[j].tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) start = u64_stats_fetch_begin_irq(&qstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) packets = qstats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) bytes = qstats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) data[i++] = packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) data[i++] = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) qstats = &nvdev->chan_table[j].rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) start = u64_stats_fetch_begin_irq(&qstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) packets = qstats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) bytes = qstats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) xdp_drop = qstats->xdp_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) data[i++] = packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) data[i++] = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) data[i++] = xdp_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) pcpu_sum = kvmalloc_array(num_possible_cpus(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) sizeof(struct netvsc_ethtool_pcpu_stats),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!pcpu_sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) netvsc_get_pcpu_stats(dev, pcpu_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) data[i++] = *(u64 *)((void *)this_sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) + pcpu_stats[j].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) kvfree(pcpu_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) u8 *p = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int i, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) for (i = 0; i < nvdev->num_chn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) sprintf(p, "tx_queue_%u_packets", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) sprintf(p, "tx_queue_%u_bytes", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) sprintf(p, "rx_queue_%u_packets", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) sprintf(p, "rx_queue_%u_bytes", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) sprintf(p, "rx_queue_%u_xdp_drop", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) sprintf(p, pcpu_stats[i].name, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) p += ETH_GSTRING_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) netvsc_get_rss_hash_opts(struct net_device_context *ndc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) struct ethtool_rxnfc *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) info->data = RXH_IP_SRC | RXH_IP_DST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) switch (info->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (ndc->l4_hash & HV_TCP4_L4HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) info->data |= l4_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) if (ndc->l4_hash & HV_TCP6_L4HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) info->data |= l4_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (ndc->l4_hash & HV_UDP4_L4HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) info->data |= l4_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (ndc->l4_hash & HV_UDP6_L4HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) info->data |= l4_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case IPV4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case IPV6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) info->data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) u32 *rules)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) switch (info->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) case ETHTOOL_GRXRINGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) info->data = nvdev->num_chn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) case ETHTOOL_GRXFH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return netvsc_get_rss_hash_opts(ndc, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) struct ethtool_rxnfc *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (info->data == (RXH_IP_SRC | RXH_IP_DST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) switch (info->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) ndc->l4_hash |= HV_TCP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) ndc->l4_hash |= HV_TCP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) ndc->l4_hash |= HV_UDP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ndc->l4_hash |= HV_UDP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) switch (info->flow_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) case TCP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ndc->l4_hash &= ~HV_TCP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) case TCP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ndc->l4_hash &= ~HV_TCP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) case UDP_V4_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ndc->l4_hash &= ~HV_UDP4_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) case UDP_V6_FLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) ndc->l4_hash &= ~HV_UDP6_L4HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct net_device_context *ndc = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (info->cmd == ETHTOOL_SRXFH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return netvsc_set_rss_hash_opts(ndc, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) return NETVSC_HASH_KEYLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) static u32 netvsc_rss_indir_size(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return ITAB_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) u8 *hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct rndis_device *rndis_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) rndis_dev = ndev->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (indir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) for (i = 0; i < ITAB_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) indir[i] = ndc->rx_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) const u8 *key, const u8 hfunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct net_device_context *ndc = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct rndis_device *rndis_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) rndis_dev = ndev->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (indir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) for (i = 0; i < ITAB_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (indir[i] >= ndev->num_chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) for (i = 0; i < ITAB_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ndc->rx_table[i] = indir[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) if (!key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (!indir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) key = rndis_dev->rss_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return rndis_filter_set_rss_param(rndis_dev, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /* Hyper-V RNDIS protocol does not have ring in the HW sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * It does have pre-allocated receive area which is divided into sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) u32 max_buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) ring->rx_pending = nvdev->recv_section_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ring->tx_pending = nvdev->send_section_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) / nvdev->send_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static void netvsc_get_ringparam(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) struct net_device_context *ndevctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (!nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) __netvsc_get_ringparam(nvdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) static int netvsc_set_ringparam(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct ethtool_ringparam *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct net_device_context *ndevctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct netvsc_device_info *device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct ethtool_ringparam orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) u32 new_tx, new_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) if (!nvdev || nvdev->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) memset(&orig, 0, sizeof(orig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) __netvsc_get_ringparam(nvdev, &orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) new_tx = clamp_t(u32, ring->tx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) new_rx = clamp_t(u32, ring->rx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) if (new_tx == orig.tx_pending &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) new_rx == orig.rx_pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return 0; /* no change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) device_info = netvsc_devinfo_get(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) device_info->send_sections = new_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) device_info->recv_sections = new_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) ret = netvsc_detach(ndev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) ret = netvsc_attach(ndev, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) device_info->send_sections = orig.tx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) device_info->recv_sections = orig.rx_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (netvsc_attach(ndev, device_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) netdev_err(ndev, "restoring ringparam failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static netdev_features_t netvsc_fix_features(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct net_device_context *ndevctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if (!nvdev || nvdev->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) features ^= NETIF_F_LRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static int netvsc_set_features(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) netdev_features_t change = features ^ ndev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct net_device_context *ndevctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct ndis_offload_params offloads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) if (!nvdev || nvdev->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!(change & NETIF_F_LRO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) goto syncvf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) memset(&offloads, 0, sizeof(struct ndis_offload_params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (features & NETIF_F_LRO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) features ^= NETIF_F_LRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) ndev->features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) syncvf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) vf_netdev->wanted_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) netdev_update_features(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) static int netvsc_get_regs_len(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return VRSS_SEND_TAB_SIZE * sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static void netvsc_get_regs(struct net_device *netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) struct ethtool_regs *regs, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) struct net_device_context *ndc = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) u32 *regs_buff = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /* increase the version, if buffer format is changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) regs->version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static u32 netvsc_get_msglevel(struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return ndev_ctx->msg_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) ndev_ctx->msg_enable = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static const struct ethtool_ops ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) .get_drvinfo = netvsc_get_drvinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) .get_regs_len = netvsc_get_regs_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) .get_regs = netvsc_get_regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) .get_msglevel = netvsc_get_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) .set_msglevel = netvsc_set_msglevel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) .get_ethtool_stats = netvsc_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) .get_sset_count = netvsc_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) .get_strings = netvsc_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) .get_channels = netvsc_get_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) .set_channels = netvsc_set_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) .get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) .get_rxnfc = netvsc_get_rxnfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) .set_rxnfc = netvsc_set_rxnfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) .get_rxfh_key_size = netvsc_get_rxfh_key_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) .get_rxfh_indir_size = netvsc_rss_indir_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) .get_rxfh = netvsc_get_rxfh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) .set_rxfh = netvsc_set_rxfh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) .get_link_ksettings = netvsc_get_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) .set_link_ksettings = netvsc_set_link_ksettings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) .get_ringparam = netvsc_get_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) .set_ringparam = netvsc_set_ringparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) static const struct net_device_ops device_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) .ndo_open = netvsc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) .ndo_stop = netvsc_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) .ndo_start_xmit = netvsc_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) .ndo_change_rx_flags = netvsc_change_rx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) .ndo_set_rx_mode = netvsc_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) .ndo_fix_features = netvsc_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) .ndo_set_features = netvsc_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) .ndo_change_mtu = netvsc_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) .ndo_set_mac_address = netvsc_set_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) .ndo_select_queue = netvsc_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) .ndo_get_stats64 = netvsc_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) .ndo_bpf = netvsc_bpf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * present send GARP packet to network peers with netif_notify_peers().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static void netvsc_link_change(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct net_device_context *ndev_ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) container_of(w, struct net_device_context, dwork.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) struct hv_device *device_obj = ndev_ctx->device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) struct net_device *net = hv_get_drvdata(device_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) struct netvsc_device *net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct rndis_device *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct netvsc_reconfig *event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) bool notify = false, reschedule = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) unsigned long flags, next_reconfig, delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* if changes are happening, comeback later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (!rtnl_trylock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) net_device = rtnl_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!net_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) rdev = net_device->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (time_is_after_jiffies(next_reconfig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* link_watch only sends one notification with current state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * per second, avoid doing reconfig more frequently. Handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * wrap around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) delay = next_reconfig - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) schedule_delayed_work(&ndev_ctx->dwork, delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) ndev_ctx->last_reconfig = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) spin_lock_irqsave(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) if (!list_empty(&ndev_ctx->reconfig_events)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) event = list_first_entry(&ndev_ctx->reconfig_events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct netvsc_reconfig, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) reschedule = !list_empty(&ndev_ctx->reconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) spin_unlock_irqrestore(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) switch (event->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /* Only the following events are possible due to the check in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * netvsc_linkstatus_callback()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) case RNDIS_STATUS_MEDIA_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (rdev->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) rdev->link_state = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) netif_carrier_on(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) netvsc_tx_enable(net_device, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) notify = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) case RNDIS_STATUS_MEDIA_DISCONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (!rdev->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) rdev->link_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) netif_carrier_off(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) netvsc_tx_disable(net_device, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) case RNDIS_STATUS_NETWORK_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /* Only makes sense if carrier is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (!rdev->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) rdev->link_state = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) netif_carrier_off(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) netvsc_tx_disable(net_device, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) event->event = RNDIS_STATUS_MEDIA_CONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) spin_lock_irqsave(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) list_add(&event->list, &ndev_ctx->reconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) spin_unlock_irqrestore(&ndev_ctx->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) reschedule = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) netdev_notify_peers(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) /* link_watch only sends one notification with current state per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * second, handle next reconfig event in 2 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (reschedule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) dev = netdev_master_upper_dev_get(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) if (!dev || dev->netdev_ops != &device_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) return NULL; /* not a netvsc device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) net_device_ctx = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (!rtnl_dereference(net_device_ctx->nvdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) return NULL; /* device is removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* Called when VF is injecting data into network stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * Change the associated network device from VF to netvsc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * note: already called with rcu_read_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) struct sk_buff *skb = *pskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct netvsc_vf_pcpu_stats *pcpu_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) = this_cpu_ptr(ndev_ctx->vf_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) skb = skb_share_check(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) *pskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) skb->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) u64_stats_update_begin(&pcpu_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) pcpu_stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) pcpu_stats->rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) u64_stats_update_end(&pcpu_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) return RX_HANDLER_ANOTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) static int netvsc_vf_join(struct net_device *vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) ret = netdev_rx_handler_register(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) netvsc_vf_handle_frame, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) netdev_err(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) "can not register netvsc VF receive handler (err = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) goto rx_handler_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) ret = netdev_master_upper_dev_link(vf_netdev, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) netdev_err(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) "can not set master device %s (err = %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ndev->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) goto upper_link_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) /* set slave flag before open to prevent IPv6 addrconf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) vf_netdev->flags |= IFF_SLAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) netdev_info(vf_netdev, "joined to %s\n", ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) upper_link_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) netdev_rx_handler_unregister(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) rx_handler_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) static void __netvsc_vf_setup(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) /* Align MTU of VF with master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) ret = dev_set_mtu(vf_netdev, ndev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) netdev_warn(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) "unable to change mtu to %u\n", ndev->mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) /* set multicast etc flags on VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) /* sync address list from ndev to VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) netif_addr_lock_bh(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) dev_uc_sync(vf_netdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) dev_mc_sync(vf_netdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) netif_addr_unlock_bh(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) ret = dev_open(vf_netdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) netdev_warn(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) "unable to open: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* Setup VF as slave of the synthetic device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * Runs in workqueue to avoid recursion in netlink callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static void netvsc_vf_setup(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct net_device_context *ndev_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) = container_of(w, struct net_device_context, vf_takeover.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) struct net_device *vf_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (!rtnl_trylock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) __netvsc_vf_setup(ndev, vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) /* Find netvsc by VF serial number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * The PCI hyperv controller records the serial number as the slot kobj name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) struct device *parent = vf_netdev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct net_device_context *ndev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) u32 serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (!parent || !dev_is_pci(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) return NULL; /* not a PCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) pdev = to_pci_dev(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (!pdev->slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) netdev_notice(vf_netdev, "no PCI slot information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) pci_slot_name(pdev->slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (!ndev_ctx->vf_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (ndev_ctx->vf_serial == serial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return hv_get_drvdata(ndev_ctx->device_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) netdev_notice(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) "no netdev found for vf serial:%u\n", serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) static int netvsc_register_vf(struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct netvsc_device *netvsc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (vf_netdev->addr_len != ETH_ALEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) ndev = get_netvsc_byslot(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /* if synthetic interface is a different namespace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * then move the VF to that namespace; join will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) * done again in that context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) ret = dev_change_net_namespace(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) dev_net(ndev), "eth%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) netdev_err(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) "could not move to same namespace as %s: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) ndev->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) netdev_info(vf_netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) "VF moved to namespace with: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) ndev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (netvsc_vf_join(vf_netdev, ndev) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) dev_hold(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) vf_netdev->wanted_features = ndev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) netdev_update_features(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) prog = netvsc_xdp_get(netvsc_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) netvsc_vf_setxdp(vf_netdev, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /* Change the data path when VF UP/DOWN/CHANGE are detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * Typically a UP or DOWN event is followed by a CHANGE event, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * net_device_ctx->data_path_is_vf is used to cache the current data path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) * message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * interface, there is only the CHANGE event and no UP or DOWN event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static int netvsc_vf_changed(struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) struct netvsc_device *netvsc_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) bool vf_is_up = netif_running(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) ndev = get_netvsc_byref(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) if (!netvsc_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (net_device_ctx->data_path_is_vf == vf_is_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) net_device_ctx->data_path_is_vf = vf_is_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) netvsc_switch_datapath(ndev, vf_is_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) netdev_info(ndev, "Data path switched %s VF: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) vf_is_up ? "to" : "from", vf_netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) static int netvsc_unregister_vf(struct net_device *vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) struct net_device *ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) ndev = get_netvsc_byref(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (!ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) netvsc_vf_setxdp(vf_netdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) netdev_rx_handler_unregister(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) netdev_upper_dev_unlink(vf_netdev, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) dev_put(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) static int netvsc_probe(struct hv_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) const struct hv_vmbus_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) struct net_device *net = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct netvsc_device_info *device_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) net = alloc_etherdev_mq(sizeof(struct net_device_context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) VRSS_CHANNEL_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (!net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) goto no_net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) netif_carrier_off(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) netvsc_init_settings(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) net_device_ctx->device_ctx = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if (netif_msg_probe(net_device_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) netdev_dbg(net, "netvsc msg_enable: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) net_device_ctx->msg_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) hv_set_drvdata(dev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) spin_lock_init(&net_device_ctx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) net_device_ctx->vf_stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!net_device_ctx->vf_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) goto no_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) net->netdev_ops = &device_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) net->ethtool_ops = ðtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) SET_NETDEV_DEV(net, &dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) /* We always need headroom for rndis header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) net->needed_headroom = RNDIS_AND_PPI_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) /* Initialize the number of queues to be 1, we may change it if more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) * channels are offered later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) netif_set_real_num_tx_queues(net, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) netif_set_real_num_rx_queues(net, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /* Notify the netvsc driver of the new device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) device_info = netvsc_devinfo_get(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (!device_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) goto devinfo_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) nvdev = rndis_filter_device_add(dev, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (IS_ERR(nvdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) ret = PTR_ERR(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) goto rndis_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /* We must get rtnl lock before scheduling nvdev->subchan_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * otherwise netvsc_subchan_work() can get rtnl lock first and wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * all subchannels to show up, but that may not happen because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * -> ... -> device_add() -> ... -> __device_attach() can't get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) * the device lock, so all the subchannels can't be processed --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * finally netvsc_subchan_work() hangs forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (nvdev->num_chn > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) schedule_work(&nvdev->subchan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) /* hw_features computed in rndis_netdev_set_hwcaps() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) net->features = net->hw_features |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) NETIF_F_HW_VLAN_CTAG_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) net->vlan_features = net->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) netdev_lockdep_set_classes(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /* MTU range: 68 - 1500 or 65521 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) net->min_mtu = NETVSC_MTU_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) net->max_mtu = NETVSC_MTU - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) net->max_mtu = ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) nvdev->tx_disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) ret = register_netdevice(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) pr_err("Unable to register netdev.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) goto register_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) list_add(&net_device_ctx->list, &netvsc_dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) register_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) rndis_filter_device_remove(dev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) rndis_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) devinfo_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) free_percpu(net_device_ctx->vf_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) no_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) hv_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) free_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) no_net:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) static int netvsc_remove(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct net_device_context *ndev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct net_device *vf_netdev, *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) net = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (net == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) dev_err(&dev->device, "No net device to remove\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) cancel_delayed_work_sync(&ndev_ctx->dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) nvdev = rtnl_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (nvdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) cancel_work_sync(&nvdev->subchan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) netvsc_xdp_set(net, NULL, NULL, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * Call to the vsc driver to let it know that the device is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * removed. Also blocks mtu and channel changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (vf_netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) netvsc_unregister_vf(vf_netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) rndis_filter_device_remove(dev, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) unregister_netdevice(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) list_del(&ndev_ctx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) hv_set_drvdata(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) free_percpu(ndev_ctx->vf_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) free_netdev(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) static int netvsc_suspend(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct net_device_context *ndev_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct netvsc_device *nvdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) struct net_device *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) net = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) ndev_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) cancel_delayed_work_sync(&ndev_ctx->dwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) nvdev = rtnl_dereference(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (nvdev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) /* Save the current config info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) ret = netvsc_detach(net, nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) static int netvsc_resume(struct hv_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct net_device *net = hv_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct net_device_context *net_device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct netvsc_device_info *device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) net_device_ctx = netdev_priv(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /* Reset the data path to the netvsc NIC before re-opening the vmbus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * channel. Later netvsc_netdev_event() will switch the data path to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * the VF upon the UP or CHANGE event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) net_device_ctx->data_path_is_vf = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) device_info = net_device_ctx->saved_netvsc_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) ret = netvsc_attach(net, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) netvsc_devinfo_put(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) net_device_ctx->saved_netvsc_dev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) static const struct hv_vmbus_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) /* Network guid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) { HV_NIC_GUID, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) MODULE_DEVICE_TABLE(vmbus, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) /* The one and only one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) static struct hv_driver netvsc_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) .id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) .probe = netvsc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) .remove = netvsc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) .suspend = netvsc_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) .resume = netvsc_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) .probe_type = PROBE_FORCE_SYNCHRONOUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * On Hyper-V, every VF interface is matched with a corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * synthetic interface. The synthetic interface is presented first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * to the guest. When the corresponding VF instance is registered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * we will take care of switching the data path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) static int netvsc_netdev_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* Skip our own events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) if (event_dev->netdev_ops == &device_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) /* Avoid non-Ethernet type devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (event_dev->type != ARPHRD_ETHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /* Avoid Vlan dev with same MAC registering as VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) if (is_vlan_dev(event_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /* Avoid Bonding master dev with same MAC registering as VF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if ((event_dev->priv_flags & IFF_BONDING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) (event_dev->flags & IFF_MASTER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) case NETDEV_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return netvsc_register_vf(event_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) return netvsc_unregister_vf(event_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) case NETDEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) case NETDEV_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) return netvsc_vf_changed(event_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) static struct notifier_block netvsc_netdev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) .notifier_call = netvsc_netdev_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) static void __exit netvsc_drv_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) unregister_netdevice_notifier(&netvsc_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) vmbus_driver_unregister(&netvsc_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) static int __init netvsc_drv_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (ring_size < RING_SIZE_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) ring_size = RING_SIZE_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) pr_info("Increased ring_size to %u (min allowed)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) ring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) netvsc_ring_bytes = ring_size * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) ret = vmbus_driver_register(&netvsc_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) register_netdevice_notifier(&netvsc_netdev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) module_init(netvsc_drv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) module_exit(netvsc_drv_exit);