^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2009, Microsoft Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Haiyang Zhang <haiyangz@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Hank Janssen <hjanssen@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/sync_bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "hyperv_net.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "netvsc_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Switch the data path from the synthetic interface to the VF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) void netvsc_switch_datapath(struct net_device *ndev, bool vf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct hv_device *dev = net_device_ctx->device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) memset(init_pkt, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (vf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) init_pkt->msg.v4_msg.active_dp.active_datapath =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) NVSP_DATAPATH_VF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) init_pkt->msg.v4_msg.active_dp.active_datapath =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) NVSP_DATAPATH_SYNTHETIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) trace_nvsp_send(ndev, init_pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) vmbus_sendpacket(dev->channel, init_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) (unsigned long)init_pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Worker to setup sub channels on initial setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Initial hotplug event occurs in softirq context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * and can't wait for channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void netvsc_subchan_work(struct work_struct *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct netvsc_device *nvdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) container_of(w, struct netvsc_device, subchan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct rndis_device *rdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Avoid deadlock with device removal already under RTNL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!rtnl_trylock()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) schedule_work(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) rdev = nvdev->extension;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (rdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) netif_device_attach(rdev->ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* fallback to only primary channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for (i = 1; i < nvdev->num_chn; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) netif_napi_del(&nvdev->chan_table[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) nvdev->max_chn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) nvdev->num_chn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static struct netvsc_device *alloc_net_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct netvsc_device *net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!net_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) init_waitqueue_head(&net_device->wait_drain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) net_device->destroy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) net_device->tx_disable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) init_completion(&net_device->channel_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) init_waitqueue_head(&net_device->subchan_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void free_netvsc_device(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct netvsc_device *nvdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) = container_of(head, struct netvsc_device, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kfree(nvdev->extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) vfree(nvdev->recv_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) vfree(nvdev->send_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) kfree(nvdev->send_section_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) vfree(nvdev->chan_table[i].mrc.slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kfree(nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) call_rcu(&nvdev->rcu, free_netvsc_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void netvsc_revoke_recv_buf(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct nvsp_message *revoke_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * If we got a section count, it means we received a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * SendReceiveBufferComplete msg (ie sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * to send a revoke msg here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (net_device->recv_section_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Send the revoke receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) revoke_packet = &net_device->revoke_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) memset(revoke_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) revoke_packet->hdr.msg_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) revoke_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) trace_nvsp_send(ndev, revoke_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ret = vmbus_sendpacket(device->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) revoke_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) (unsigned long)revoke_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* If the failure is because the channel is rescinded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * ignore the failure since we cannot send on a rescinded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * channel. This would allow us to properly cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * even when the channel is rescinded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (device->channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * If we failed here, we might as well return and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * have a leak rather than continue and a bugchk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) netdev_err(ndev, "unable to send "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "revoke receive buffer to netvsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) net_device->recv_section_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void netvsc_revoke_send_buf(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct nvsp_message *revoke_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Deal with the send buffer we may have setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * If we got a send section size, it means we received a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * to send a revoke msg here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (net_device->send_section_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Send the revoke receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) revoke_packet = &net_device->revoke_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) memset(revoke_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) revoke_packet->hdr.msg_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) revoke_packet->msg.v1_msg.revoke_send_buf.id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) NETVSC_SEND_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) trace_nvsp_send(ndev, revoke_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ret = vmbus_sendpacket(device->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) revoke_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) (unsigned long)revoke_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* If the failure is because the channel is rescinded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * ignore the failure since we cannot send on a rescinded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * channel. This would allow us to properly cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * even when the channel is rescinded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (device->channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* If we failed here, we might as well return and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * have a leak rather than continue and a bugchk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) netdev_err(ndev, "unable to send "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "revoke send buffer to netvsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) net_device->send_section_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void netvsc_teardown_recv_gpadl(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (net_device->recv_buf_gpadl_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ret = vmbus_teardown_gpadl(device->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) net_device->recv_buf_gpadl_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* If we failed here, we might as well return and have a leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * rather than continue and a bugchk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) "unable to teardown receive buffer's gpadl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) net_device->recv_buf_gpadl_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void netvsc_teardown_send_gpadl(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct net_device *ndev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (net_device->send_buf_gpadl_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = vmbus_teardown_gpadl(device->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) net_device->send_buf_gpadl_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* If we failed here, we might as well return and have a leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * rather than continue and a bugchk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) "unable to teardown send buffer's gpadl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) net_device->send_buf_gpadl_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int node = cpu_to_node(nvchan->channel->target_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) nvchan->mrc.slots = vzalloc_node(size, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!nvchan->mrc.slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) nvchan->mrc.slots = vzalloc(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return nvchan->mrc.slots ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static int netvsc_init_buf(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) const struct netvsc_device_info *device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct nvsp_1_message_send_receive_buffer_complete *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct nvsp_message *init_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) unsigned int buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) size_t map_words;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Get receive buffer area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) buf_size = device_info->recv_sections * device_info->recv_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) buf_size = roundup(buf_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Legacy hosts only allow smaller receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) buf_size = min_t(unsigned int, buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) net_device->recv_buf = vzalloc(buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!net_device->recv_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) "unable to allocate receive buffer of size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) net_device->recv_buf_size = buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Establish the gpadl handle for this buffer on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * channel. Note: This call uses the vmbus connection rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * than the channel to establish the gpadl handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) &net_device->recv_buf_gpadl_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) "unable to establish receive buffer's gpadl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Notify the NetVsp of the gpadl handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) init_packet = &net_device->channel_init_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) memset(init_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) init_packet->msg.v1_msg.send_recv_buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) gpadl_handle = net_device->recv_buf_gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) init_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) trace_nvsp_send(ndev, init_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Send the gpadl notification request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = vmbus_sendpacket(device->channel, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (unsigned long)init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) "unable to send receive buffer's gpadl to netvsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) wait_for_completion(&net_device->channel_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Check the response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (resp->status != NVSP_STAT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) "Unable to complete receive buffer initialization with NetVsp - status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) resp->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* Parse the response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) resp->num_sections, resp->sections[0].sub_alloc_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) resp->sections[0].num_sub_allocs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* There should only be one section for the entire receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) net_device->recv_section_size = resp->sections[0].sub_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Ensure buffer will not overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) (u64)net_device->recv_section_cnt > (u64)buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) netdev_err(ndev, "invalid recv_section_size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) net_device->recv_section_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Setup receive completion ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Add 1 to the recv_section_cnt because at least one entry in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * ring buffer has to be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ret = netvsc_alloc_recv_comp_ring(net_device, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Now setup the send buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) buf_size = device_info->send_sections * device_info->send_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) buf_size = round_up(buf_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) net_device->send_buf = vzalloc(buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!net_device->send_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) netdev_err(ndev, "unable to allocate send buffer of size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /* Establish the gpadl handle for this buffer on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * channel. Note: This call uses the vmbus connection rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * than the channel to establish the gpadl handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) buf_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) &net_device->send_buf_gpadl_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) "unable to establish send buffer's gpadl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Notify the NetVsp of the gpadl handle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) init_packet = &net_device->channel_init_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) memset(init_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) net_device->send_buf_gpadl_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) trace_nvsp_send(ndev, init_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* Send the gpadl notification request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ret = vmbus_sendpacket(device->channel, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) (unsigned long)init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) "unable to send send buffer's gpadl to netvsp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) wait_for_completion(&net_device->channel_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Check the response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (init_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) netdev_err(ndev, "Unable to complete send buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) "initialization with NetVsp - status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) init_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) send_send_buf_complete.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Parse the response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) net_device->send_section_size = init_packet->msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) v1_msg.send_send_buf_complete.section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (net_device->send_section_size < NETVSC_MTU_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) netdev_err(ndev, "invalid send_section_size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) net_device->send_section_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* Section count is simply the size divided by the section size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) net_device->send_section_cnt = buf_size / net_device->send_section_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) net_device->send_section_size, net_device->send_section_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Setup state for managing the send buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (net_device->send_section_map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) netvsc_revoke_recv_buf(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) netvsc_revoke_send_buf(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) netvsc_teardown_recv_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) netvsc_teardown_send_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Negotiate NVSP protocol version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int negotiate_nvsp_ver(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct nvsp_message *init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) u32 nvsp_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) memset(init_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) trace_nvsp_send(ndev, init_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Send the init request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = vmbus_sendpacket(device->channel, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) (unsigned long)init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) wait_for_completion(&net_device->channel_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (init_packet->msg.init_msg.init_complete.status !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) NVSP_STAT_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* NVSPv2 or later: Send NDIS config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) memset(init_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* Teaming bit is needed to receive link speed updates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) trace_nvsp_send(ndev, init_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = vmbus_sendpacket(device->channel, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (unsigned long)init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int netvsc_connect_vsp(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) const struct netvsc_device_info *device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static const u32 ver_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct nvsp_message *init_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int ndis_version, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) init_packet = &net_device->channel_init_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Negotiate the latest NVSP protocol supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (negotiate_nvsp_ver(device, net_device, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ver_list[i]) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) net_device->nvsp_version = ver_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (i < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ret = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Send the ndis version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) memset(init_packet, 0, sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ndis_version = 0x00060001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ndis_version = 0x0006001e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) init_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) send_ndis_ver.ndis_major_ver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) (ndis_version & 0xFFFF0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) init_packet->msg.v1_msg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) send_ndis_ver.ndis_minor_ver =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ndis_version & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) trace_nvsp_send(ndev, init_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* Send the init request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ret = vmbus_sendpacket(device->channel, init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) sizeof(struct nvsp_message),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) (unsigned long)init_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) VM_PKT_DATA_INBAND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = netvsc_init_buf(device, net_device, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * netvsc_device_remove - Callback when the root bus device is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void netvsc_device_remove(struct hv_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct netvsc_device *net_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) = rtnl_dereference(net_device_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Revoke receive buffer. If host is pre-Win2016 then tear down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * receive buffer GPADL. Do the same for send buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) netvsc_revoke_recv_buf(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (vmbus_proto_version < VERSION_WIN10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) netvsc_teardown_recv_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) netvsc_revoke_send_buf(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (vmbus_proto_version < VERSION_WIN10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) netvsc_teardown_send_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* Disable NAPI and disassociate its context from the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) for (i = 0; i < net_device->num_chn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* See also vmbus_reset_channel_cb(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) napi_disable(&net_device->chan_table[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) netif_napi_del(&net_device->chan_table[i].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * At this point, no one should be accessing net_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * except in here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) netdev_dbg(ndev, "net device safe to remove\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Now, we can close the channel safely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) vmbus_close(device->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * If host is Win2016 or higher then we do the GPADL tear down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * here after VMBus is closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (vmbus_proto_version >= VERSION_WIN10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) netvsc_teardown_recv_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) netvsc_teardown_send_gpadl(device, net_device, ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Release all resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) free_netvsc_device_rcu(net_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) #define RING_AVAIL_PERCENT_HIWATER 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) #define RING_AVAIL_PERCENT_LOWATER 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) sync_change_bit(index, net_device->send_section_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void netvsc_send_tx_complete(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct vmbus_channel *channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) const struct vmpacket_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) u16 q_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int queue_sends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) /* Notify the layer above us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (likely(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) const struct hv_netvsc_packet *packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) = (struct hv_netvsc_packet *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) u32 send_index = packet->send_buf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct netvsc_stats *tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (send_index != NETVSC_INVALID_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) netvsc_free_send_slot(net_device, send_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) q_idx = packet->q_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) tx_stats = &net_device->chan_table[q_idx].tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) u64_stats_update_begin(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) tx_stats->packets += packet->total_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tx_stats->bytes += packet->total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) u64_stats_update_end(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) napi_consume_skb(skb, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) queue_sends =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (unlikely(net_device->destroy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (queue_sends == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) wake_up(&net_device->wait_drain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) (hv_get_avail_to_write_percent(&channel->outbound) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ndev_ctx->eth_stats.wake_queue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static void netvsc_send_completion(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct vmbus_channel *incoming_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) const struct vmpacket_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) u32 msglen = hv_pkt_datalen(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Ensure packet is big enough to read header fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (msglen < sizeof(struct nvsp_message_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) switch (nvsp_packet->hdr.msg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case NVSP_MSG_TYPE_INIT_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) sizeof(struct nvsp_message_init_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) netdev_err(ndev, "nvsp_msg length too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) case NVSP_MSG5_TYPE_SUBCHANNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) sizeof(struct nvsp_5_subchannel_complete)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Copy the response back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) memcpy(&net_device->channel_init_pkt, nvsp_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) sizeof(struct nvsp_message));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) complete(&net_device->channel_init_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) netvsc_send_tx_complete(ndev, net_device, incoming_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) desc, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) "Unknown send completion type %d received!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) nvsp_packet->hdr.msg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned long *map_addr = net_device->send_section_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (sync_test_and_set_bit(i, map_addr) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return NETVSC_INVALID_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) unsigned int section_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 pend_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct hv_netvsc_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct rndis_message *rndis_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct hv_page_buffer *pb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) bool xmit_more)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) char *start = net_device->send_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) char *dest = start + (section_index * net_device->send_section_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) + pend_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) u32 padding = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) packet->page_buf_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Add padding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) remain = packet->total_data_buflen & (net_device->pkt_align - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (xmit_more && remain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) padding = net_device->pkt_align - remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) rndis_msg->msg_len += padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) packet->total_data_buflen += padding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) for (i = 0; i < page_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) u32 offset = pb[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) u32 len = pb[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) memcpy(dest, (src + offset), len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dest += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (padding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) memset(dest, 0, padding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) static inline int netvsc_send_pkt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct hv_netvsc_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct hv_page_buffer *pb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct nvsp_message nvmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct nvsp_1_message_send_rndis_packet *rpkt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) &nvmsg.msg.v1_msg.send_rndis_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct netvsc_channel * const nvchan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) &net_device->chan_table[packet->q_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct vmbus_channel *out_channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) u64 req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rpkt->channel_type = 0; /* 0 is RMC_DATA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) rpkt->channel_type = 1; /* 1 is RMC_CONTROL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) rpkt->send_buf_section_index = packet->send_buf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (packet->send_buf_index == NETVSC_INVALID_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) rpkt->send_buf_section_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) rpkt->send_buf_section_size = packet->total_data_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) req_id = (ulong)skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (out_channel->rescind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) trace_nvsp_send_pkt(ndev, out_channel, rpkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (packet->page_buf_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (packet->cp_partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) pb += packet->rmsg_pgcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret = vmbus_sendpacket_pagebuffer(out_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) pb, packet->page_buf_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) &nvmsg, sizeof(nvmsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) req_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ret = vmbus_sendpacket(out_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) &nvmsg, sizeof(nvmsg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) req_id, VM_PKT_DATA_INBAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) atomic_inc_return(&nvchan->queue_sends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ndev_ctx->eth_stats.stop_queue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) netif_tx_stop_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ndev_ctx->eth_stats.stop_queue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) "Unable to send packet pages %u len %u, ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) packet->page_buf_cnt, packet->total_data_buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (netif_tx_queue_stopped(txq) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) atomic_read(&nvchan->queue_sends) < 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) !net_device->tx_disable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ndev_ctx->eth_stats.wake_queue++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (ret == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Move packet out of multi send data (msd), and clear msd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct sk_buff **msd_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct multi_send_data *msdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) *msd_skb = msdp->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) *msd_send = msdp->pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) msdp->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) msdp->pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) msdp->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* RCU already held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int netvsc_send(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct hv_netvsc_packet *packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct rndis_message *rndis_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct hv_page_buffer *pb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) bool xdp_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) struct netvsc_device *net_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) = rcu_dereference_bh(ndev_ctx->nvdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct hv_device *device = ndev_ctx->device_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct netvsc_channel *nvchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u32 pktlen = packet->total_data_buflen, msd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) unsigned int section_index = NETVSC_INVALID_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct multi_send_data *msdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct sk_buff *msd_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bool try_batch, xmit_more;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* If device is rescinded, return error and packet will get dropped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (unlikely(!net_device || net_device->destroy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) nvchan = &net_device->chan_table[packet->q_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) packet->send_buf_index = NETVSC_INVALID_INDEX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) packet->cp_partial = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* Send a control message or XDP packet directly without accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * msd (Multi-Send Data) field which may be changed during data packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!skb || xdp_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return netvsc_send_pkt(device, packet, net_device, pb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* batch packets in send buffer if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) msdp = &nvchan->msd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (msdp->pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) msd_len = msdp->pkt->total_data_buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) try_batch = msd_len > 0 && msdp->count < net_device->max_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (try_batch && msd_len + pktlen + net_device->pkt_align <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) net_device->send_section_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) section_index = msdp->pkt->send_buf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) } else if (try_batch && msd_len + packet->rmsg_size <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) net_device->send_section_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) section_index = msdp->pkt->send_buf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) packet->cp_partial = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) } else if (pktlen + net_device->pkt_align <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) net_device->send_section_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) section_index = netvsc_get_next_send_section(net_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ++ndev_ctx->eth_stats.tx_send_full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) move_pkt_msd(&msd_send, &msd_skb, msdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) msd_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Keep aggregating only if stack says more data is coming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * and not doing mixed modes send and not flow blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) xmit_more = netdev_xmit_more() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) !packet->cp_partial &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (section_index != NETVSC_INVALID_INDEX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) netvsc_copy_to_send_buf(net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) section_index, msd_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) packet, rndis_msg, pb, xmit_more);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) packet->send_buf_index = section_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (packet->cp_partial) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) packet->page_buf_cnt -= packet->rmsg_pgcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) packet->total_data_buflen = msd_len + packet->rmsg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) packet->page_buf_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) packet->total_data_buflen += msd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (msdp->pkt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) packet->total_packets += msdp->pkt->total_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) packet->total_bytes += msdp->pkt->total_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (msdp->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) dev_consume_skb_any(msdp->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (xmit_more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) msdp->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) msdp->pkt = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) msdp->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) cur_send = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) msdp->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) msdp->pkt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) msdp->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) move_pkt_msd(&msd_send, &msd_skb, msdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) cur_send = packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (msd_send) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int m_ret = netvsc_send_pkt(device, msd_send, net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) NULL, msd_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (m_ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) netvsc_free_send_slot(net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) msd_send->send_buf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) dev_kfree_skb_any(msd_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (cur_send)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) netvsc_free_send_slot(net_device, section_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /* Send pending recv completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) static int send_recv_completions(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct netvsc_device *nvdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct netvsc_channel *nvchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) struct multi_recv_comp *mrc = &nvchan->mrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct recv_comp_msg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct nvsp_message_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct recv_comp_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) while (mrc->first != mrc->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) const struct recv_comp_data *rcd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) = mrc->slots + mrc->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) msg.status = rcd->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) rcd->tid, VM_PKT_COMP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) struct net_device_context *ndev_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ++ndev_ctx->eth_stats.rx_comp_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (++mrc->first == nvdev->recv_completion_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mrc->first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* receive completion ring has been emptied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (unlikely(nvdev->destroy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) wake_up(&nvdev->wait_drain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Count how many receive completions are outstanding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) const struct multi_recv_comp *mrc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) u32 *filled, u32 *avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) u32 count = nvdev->recv_completion_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (mrc->next >= mrc->first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *filled = mrc->next - mrc->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) *filled = (count - mrc->first) + mrc->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *avail = count - *filled - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /* Add receive complete to ring to send to host. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void enq_receive_complete(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct netvsc_device *nvdev, u16 q_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) u64 tid, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct multi_recv_comp *mrc = &nvchan->mrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct recv_comp_data *rcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) u32 filled, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (unlikely(filled > NAPI_POLL_WEIGHT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) send_recv_completions(ndev, nvdev, nvchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (unlikely(!avail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) q_idx, tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) rcd = mrc->slots + mrc->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) rcd->tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) rcd->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (++mrc->next == nvdev->recv_completion_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) mrc->next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int netvsc_receive(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct netvsc_channel *nvchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) const struct vmpacket_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct vmbus_channel *channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) const struct vmtransfer_page_packet_header *vmxferpage_packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) = container_of(desc, const struct vmtransfer_page_packet_header, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) const struct nvsp_message *nvsp = hv_pkt_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) u32 msglen = hv_pkt_datalen(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) u16 q_idx = channel->offermsg.offer.sub_channel_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) char *recv_buf = net_device->recv_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) u32 status = NVSP_STAT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* Ensure packet is big enough to read header fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (msglen < sizeof(struct nvsp_message_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) "invalid nvsp header, length too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* Make sure this is a valid nvsp packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) "Unknown nvsp packet type received %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) nvsp->hdr.msg_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* Validate xfer page pkt header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) "Invalid xfer page pkt, offset too small: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) desc->offset8 << 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) "Invalid xfer page set id - expecting %x got %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) NETVSC_RECEIVE_BUFFER_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) vmxferpage_packet->xfer_pageset_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) count = vmxferpage_packet->range_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Check count for a valid value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) "Range count is not valid: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) u32 offset = vmxferpage_packet->ranges[i].byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) u32 buflen = vmxferpage_packet->ranges[i].byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (unlikely(offset > net_device->recv_buf_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) buflen > net_device->recv_buf_size - offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) nvchan->rsc.cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) status = NVSP_STAT_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) netif_err(net_device_ctx, rx_err, ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) "Packet offset:%u + len:%u too big\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) offset, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) data = recv_buf + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) nvchan->rsc.is_last = (i == count - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) trace_rndis_recv(ndev, q_idx, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* Pass it to the upper layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ret = rndis_filter_receive(ndev, net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) nvchan, data, buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (unlikely(ret != NVSP_STAT_SUCCESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /* Drop incomplete packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) nvchan->rsc.cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) status = NVSP_STAT_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) enq_receive_complete(ndev, net_device, q_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) vmxferpage_packet->d.trans_id, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static void netvsc_send_table(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct netvsc_device *nvscdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) const struct nvsp_message *nvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) u32 msglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) u32 count, offset, *tab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* Ensure packet is big enough to read send_table fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) sizeof(struct nvsp_5_send_indirect_table)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) count = nvmsg->msg.v5_msg.send_table.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) offset = nvmsg->msg.v5_msg.send_table.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (count != VRSS_SEND_TAB_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) netdev_err(ndev, "Received wrong send-table size:%u\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * wrong due to a host bug. So fix the offset here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) msglen >= sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) offset = sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) sizeof(union nvsp_6_message_uber);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /* Boundary check for all versions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (offset > msglen - count * sizeof(u32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) netdev_err(ndev, "Received send-table offset too big:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) tab = (void *)nvmsg + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) net_device_ctx->tx_table[i] = tab[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static void netvsc_send_vf(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) const struct nvsp_message *nvmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) u32 msglen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* Ensure packet is big enough to read its fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (msglen < sizeof(struct nvsp_message_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) sizeof(struct nvsp_4_send_vf_association)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) netdev_info(ndev, "VF slot %u %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) net_device_ctx->vf_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) net_device_ctx->vf_alloc ? "added" : "removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static void netvsc_receive_inband(struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct netvsc_device *nvscdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) const struct vmpacket_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) const struct nvsp_message *nvmsg = hv_pkt_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) u32 msglen = hv_pkt_datalen(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) /* Ensure packet is big enough to read header fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (msglen < sizeof(struct nvsp_message_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) switch (nvmsg->hdr.msg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) netvsc_send_vf(ndev, nvmsg, msglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int netvsc_process_raw_pkt(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) struct netvsc_channel *nvchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct netvsc_device *net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct net_device *ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) const struct vmpacket_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct vmbus_channel *channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) const struct nvsp_message *nvmsg = hv_pkt_data(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) trace_nvsp_recv(ndev, channel, nvmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) switch (desc->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) case VM_PKT_COMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) netvsc_send_completion(ndev, net_device, channel, desc, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case VM_PKT_DATA_USING_XFER_PAGES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return netvsc_receive(ndev, net_device, nvchan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case VM_PKT_DATA_INBAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) netvsc_receive_inband(ndev, net_device, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) desc->type, desc->trans_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct vmbus_channel *primary = channel->primary_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return primary ? primary->device_obj : channel->device_obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /* Network processing softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * Process data in incoming ring buffer from host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * Stops when ring is empty or budget is met or exceeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int netvsc_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct netvsc_channel *nvchan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) = container_of(napi, struct netvsc_channel, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct netvsc_device *net_device = nvchan->net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct vmbus_channel *channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct hv_device *device = netvsc_channel_to_device(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* If starting a new interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (!nvchan->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) nvchan->desc = hv_pkt_iter_first(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) while (nvchan->desc && work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ndev, nvchan->desc, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* Send any pending receive completions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) ret = send_recv_completions(ndev, net_device, nvchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* If it did not exhaust NAPI budget this time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) * and not doing busy poll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) * then re-enable host interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * and reschedule if ring is not empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * or sending receive completion failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (work_done < budget &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) napi_complete_done(napi, work_done) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) (ret || hv_end_read(&channel->inbound)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) napi_schedule_prep(napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) hv_begin_read(&channel->inbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) __napi_schedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* Driver may overshoot since multiple packets per descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return min(work_done, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) /* Call back when data is available in host ring buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * Processing is deferred until network softirq (NAPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) void netvsc_channel_cb(void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct netvsc_channel *nvchan = context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct vmbus_channel *channel = nvchan->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct hv_ring_buffer_info *rbi = &channel->inbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) /* preload first vmpacket descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (napi_schedule_prep(&nvchan->napi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /* disable interrupts from host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) hv_begin_read(rbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) __napi_schedule_irqoff(&nvchan->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * netvsc_device_add - Callback when the device belonging to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * driver is added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct netvsc_device *netvsc_device_add(struct hv_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) const struct netvsc_device_info *device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct netvsc_device *net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct net_device *ndev = hv_get_drvdata(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct net_device_context *net_device_ctx = netdev_priv(ndev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) net_device = alloc_net_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!net_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) net_device_ctx->tx_table[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) /* Because the device uses NAPI, all the interrupt batching and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) * control is done via Net softirq, not the channel handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) set_channel_read_mode(device->channel, HV_CALL_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* If we're reopening the device we may have multiple queues, fill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * chn_table with the default channel to use it before subchannels are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * opened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * Initialize the channel state before we open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * we can be interrupted as soon as we open the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct netvsc_channel *nvchan = &net_device->chan_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) nvchan->channel = device->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) nvchan->net_device = net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) u64_stats_init(&nvchan->tx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) u64_stats_init(&nvchan->rx_stats.syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) goto cleanup2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) MEM_TYPE_PAGE_SHARED, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) goto cleanup2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /* Enable NAPI handler before init callbacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) netif_napi_add(ndev, &net_device->chan_table[0].napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) netvsc_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) /* Open the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ret = vmbus_open(device->channel, netvsc_ring_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) netvsc_ring_bytes, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) netvsc_channel_cb, net_device->chan_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) netdev_err(ndev, "unable to open channel: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* Channel is opened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) napi_enable(&net_device->chan_table[0].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /* Connect with the NetVsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) ret = netvsc_connect_vsp(device, net_device, device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) netdev_err(ndev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) "unable to connect to NetVSP - %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) goto close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * populated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) rcu_assign_pointer(net_device_ctx->nvdev, net_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) return net_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) napi_disable(&net_device->chan_table[0].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* Now, we can close the channel safely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) vmbus_close(device->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) netif_napi_del(&net_device->chan_table[0].napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) cleanup2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) free_netvsc_device(&net_device->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }